mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
64 Commits
v0.4.0-nig
...
v0.4.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d64e1c296 | ||
|
|
e6090a8d5b | ||
|
|
b62e643e92 | ||
|
|
6f40128058 | ||
|
|
0b05c22be1 | ||
|
|
4fd1057764 | ||
|
|
6877d082f6 | ||
|
|
2dcc67769e | ||
|
|
b9bac2b195 | ||
|
|
584acca09d | ||
|
|
ad2021a8d8 | ||
|
|
c970c206d1 | ||
|
|
5c19913a91 | ||
|
|
587a24e7fb | ||
|
|
0270708d6d | ||
|
|
b7319fe2b1 | ||
|
|
ea3708b33d | ||
|
|
7abe71f399 | ||
|
|
b156225b80 | ||
|
|
2ac51c6348 | ||
|
|
7f5f8749da | ||
|
|
d4e863882f | ||
|
|
d18eb18b32 | ||
|
|
aa6452c86c | ||
|
|
d44cd9c6f5 | ||
|
|
ce0f909cac | ||
|
|
4c693799d8 | ||
|
|
57836e762b | ||
|
|
d927ab1ce5 | ||
|
|
c39de9072f | ||
|
|
815a6d2d61 | ||
|
|
f1f8a1d3a9 | ||
|
|
e7abd00fc0 | ||
|
|
5e2fdec1b6 | ||
|
|
2d9ea595cb | ||
|
|
46fa3eb629 | ||
|
|
7d0d8dc6e3 | ||
|
|
f8d152231d | ||
|
|
c8cb1ef5bc | ||
|
|
d5cadeeec3 | ||
|
|
7210b35d86 | ||
|
|
cf7e8c9142 | ||
|
|
cb4dd89754 | ||
|
|
9139962070 | ||
|
|
9718aa17c9 | ||
|
|
18896739d8 | ||
|
|
8bcad936d3 | ||
|
|
7efff2d704 | ||
|
|
93cd4ab89d | ||
|
|
e5663a075f | ||
|
|
ac81d3c74f | ||
|
|
7987e08ca2 | ||
|
|
1492700acc | ||
|
|
6f1094db0a | ||
|
|
21655cb56f | ||
|
|
5f0403c245 | ||
|
|
d7002caca7 | ||
|
|
dda922507f | ||
|
|
fdd4929c8f | ||
|
|
90b2200cc8 | ||
|
|
e3a079a142 | ||
|
|
c55841988e | ||
|
|
279df2e558 | ||
|
|
7a27ef8d11 |
@@ -12,5 +12,9 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
"-Aclippy::items_after_test_module",
|
||||
|
||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
||||
"-Aclippy::needless_pass_by_ref_mut",
|
||||
]
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
[profile.default]
|
||||
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }
|
||||
retries = { backoff = "exponential", count = 3, delay = "10s", jitter = true }
|
||||
|
||||
@@ -8,8 +8,8 @@ inputs:
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-password:
|
||||
description: The dockerhub password to login to the image registry
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
@@ -40,7 +40,7 @@ runs:
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-password }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev builder image to dockerhub
|
||||
shell: bash
|
||||
|
||||
11
.github/actions/build-greptime-binary/action.yml
vendored
11
.github/actions/build-greptime-binary/action.yml
vendored
@@ -28,12 +28,21 @@ inputs:
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make build-greptime-by-buildx \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
@@ -49,3 +58,5 @@ runs:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
16
.github/actions/build-greptime-images/action.yml
vendored
16
.github/actions/build-greptime-images/action.yml
vendored
@@ -32,6 +32,10 @@ inputs:
|
||||
platforms:
|
||||
description: The supported platforms to build the image
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -76,7 +80,19 @@ runs:
|
||||
rm -rf arm64 && \
|
||||
mv ${{ inputs.arm64-artifact-name }} arm64
|
||||
|
||||
- name: Build and push images(without latest) for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'false' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.docker-file }}
|
||||
push: true
|
||||
platforms: ${{ inputs.platforms }}
|
||||
tags: |
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
|
||||
- name: Build and push images for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
|
||||
21
.github/actions/build-images/action.yml
vendored
21
.github/actions/build-images/action.yml
vendored
@@ -7,6 +7,10 @@ inputs:
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
required: false
|
||||
default: greptimedb
|
||||
image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
@@ -16,32 +20,43 @@ inputs:
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push standard images to dockerhub
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with:
|
||||
with: # The image will be used as '${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}'
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: greptimedb
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
- name: Build and push centos images to dockerhub
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with:
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: greptimedb-centos
|
||||
image-name: ${{ inputs.image-name }}-centos
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/Dockerfile-centos
|
||||
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
|
||||
platforms: linux/amd64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
22
.github/actions/build-linux-artifacts/action.yml
vendored
22
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -25,6 +25,18 @@ inputs:
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -33,6 +45,7 @@ runs:
|
||||
shell: bash
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
|
||||
- name: Upload sqlness logs
|
||||
@@ -55,8 +68,11 @@ runs:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
@@ -68,10 +84,12 @@ runs:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' }} # Only build centos7 base image for amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -82,3 +100,5 @@ runs:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
@@ -72,7 +72,7 @@ runs:
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable_run_tests == 'false' }}
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
52
.github/actions/upload-artifacts/action.yml
vendored
52
.github/actions/upload-artifacts/action.yml
vendored
@@ -22,10 +22,19 @@ inputs:
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to upload the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
@@ -37,6 +46,7 @@ runs:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
||||
@@ -48,15 +58,16 @@ runs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}
|
||||
path: ${{ inputs.artifacts-dir }}.tar.gz
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||
|
||||
- name: Upload checksum
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
@@ -64,19 +75,24 @@ runs:
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
|
||||
- name: Upload artifacts to S3
|
||||
shell: bash
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
run: |
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: nick-invision/retry@v2
|
||||
with:
|
||||
max_attempts: 20
|
||||
timeout_minutes: 5
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
32
.github/scripts/create-version.sh
vendored
32
.github/scripts/create-version.sh
vendored
@@ -3,8 +3,9 @@
|
||||
set -e
|
||||
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like 'v0.2.0-nightly-20230313';
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like 'v0.2.0-e5b243c-2023071245';
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
@@ -23,6 +24,24 @@ function create_version() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a dev build.
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
@@ -31,7 +50,7 @@ function create_version() {
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d%S")"
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
@@ -40,5 +59,10 @@ function create_version() {
|
||||
fi
|
||||
}
|
||||
|
||||
# You can run as: GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigthly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
create_version
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
344
.github/workflows/dev-build.yml
vendored
Normal file
344
.github/workflows/dev-build.yml
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
# Development build only build the debug version of the artifacts manually.
|
||||
name: GreptimeDB Development Build
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
repository:
|
||||
description: The public repository to build
|
||||
required: false
|
||||
default: GreptimeTeam/greptimedb
|
||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||
description: The commit to build
|
||||
required: true
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'dev' to indicate it's the dev build.
|
||||
NEXT_RELEASE_VERSION: dev
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
IMAGE_NAME: greptimedb-dev
|
||||
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
COMMIT_SHA: ${{ inputs.commit }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
run: |
|
||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build successful"}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'"}
|
||||
112
.github/workflows/develop.yml
vendored
112
.github/workflows/develop.yml
vendored
@@ -25,7 +25,7 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo check
|
||||
run: cargo check --workspace --all-targets
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
name: Toml Check
|
||||
@@ -62,60 +62,21 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install taplo
|
||||
run: cargo install taplo-cli --version ^0.8 --locked
|
||||
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
||||
- name: Run taplo
|
||||
run: taplo format --check --option "indent_string= "
|
||||
|
||||
# Use coverage to run test.
|
||||
# test:
|
||||
# name: Test Suite
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - name: Cache LLVM and Clang
|
||||
# id: cache-llvm
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: ./llvm
|
||||
# key: llvm
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# with:
|
||||
# version: "14.0"
|
||||
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Cleanup disk
|
||||
# uses: curoky/cleanup-disk-action@v2.0
|
||||
# with:
|
||||
# retain: 'rust,llvm'
|
||||
# - name: Install latest nextest release
|
||||
# uses: taiki-e/install-action@nextest
|
||||
# - name: Run tests
|
||||
# run: cargo nextest run
|
||||
# env:
|
||||
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
# RUST_BACKTRACE: 1
|
||||
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
run: taplo format --check
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -127,25 +88,14 @@ jobs:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -234,3 +184,43 @@ jobs:
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
test-on-windows:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
309
.github/workflows/nightly-build.yml
vendored
Normal file
309
.github/workflows/nightly-build.yml
vendored
Normal file
@@ -0,0 +1,309 @@
|
||||
# Nightly build only do the following things:
|
||||
# 1. Run integration tests;
|
||||
# 2. Build binaries and images for linux-amd64 and linux-arm64 platform;
|
||||
name: GreptimeDB Nightly Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Trigger at 00:00(UTC) on every day-of-week from Monday through Friday.
|
||||
- cron: '0 0 * * 1-5'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.2xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'nightly' to indicate it's the nightly build.
|
||||
NEXT_RELEASE_VERSION: nightly
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
run: |
|
||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB nightly build successful"}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB nightly build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/nightly-build.yml'"}
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -336,7 +336,7 @@ jobs:
|
||||
uses: ./.github/actions/build-dev-builder-image
|
||||
with:
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
335
Cargo.lock
generated
335
Cargo.lock
generated
@@ -211,7 +211,7 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-time",
|
||||
"datatypes",
|
||||
"greptime-proto",
|
||||
"greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=940694cfd05c1e93c1dd7aab486184c9e2853098)",
|
||||
"prost",
|
||||
"snafu",
|
||||
"tonic 0.9.2",
|
||||
@@ -684,6 +684,15 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "autotools"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aef8da1805e028a172334c3b680f93e71126f2327622faef2ec3d893c0a4ad77"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.6.19"
|
||||
@@ -740,7 +749,7 @@ version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdca6a10ecad987bda04e95606ef85a5417dcaac1a78455242d72e031e2b6b62"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.28",
|
||||
@@ -1447,7 +1456,7 @@ version = "3.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1460,7 +1469,7 @@ version = "4.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.28",
|
||||
@@ -1659,6 +1668,7 @@ dependencies = [
|
||||
"paste",
|
||||
"regex",
|
||||
"snafu",
|
||||
"strum 0.21.0",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"url",
|
||||
@@ -1722,10 +1732,10 @@ dependencies = [
|
||||
"common-test-util",
|
||||
"common-version",
|
||||
"hyper",
|
||||
"once_cell",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
@@ -1808,7 +1818,6 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"prost",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
@@ -1817,17 +1826,6 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.3.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"pprof",
|
||||
"prost",
|
||||
"snafu",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.3.2"
|
||||
@@ -2452,7 +2450,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"sqlparser",
|
||||
"sqlparser 0.35.0",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -2473,7 +2471,7 @@ dependencies = [
|
||||
"num_cpus",
|
||||
"object_store",
|
||||
"parquet",
|
||||
"sqlparser",
|
||||
"sqlparser 0.35.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2502,7 +2500,7 @@ dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
"lazy_static",
|
||||
"sqlparser",
|
||||
"sqlparser 0.35.0",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.1",
|
||||
]
|
||||
@@ -2580,7 +2578,7 @@ dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"log",
|
||||
"sqlparser",
|
||||
"sqlparser 0.35.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3218,21 +3216,6 @@ version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.0"
|
||||
@@ -3307,6 +3290,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"sqlparser 0.34.0",
|
||||
"storage",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
@@ -4130,7 +4114,19 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd#eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=10c349c033dded29097d0dc933fbc2f89f658032#10c349c033dded29097d0dc933fbc2f89f658032"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=940694cfd05c1e93c1dd7aab486184c9e2853098#940694cfd05c1e93c1dd7aab486184c9e2853098"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"serde",
|
||||
@@ -4250,6 +4246,15 @@ dependencies = [
|
||||
"http",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
@@ -4430,19 +4435,6 @@ dependencies = [
|
||||
"tokio-io-timeout",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper-tls"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"hyper",
|
||||
"native-tls",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.57"
|
||||
@@ -4959,9 +4951,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.3"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0"
|
||||
checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -5312,6 +5304,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"store-api",
|
||||
"table",
|
||||
@@ -5526,17 +5519,20 @@ dependencies = [
|
||||
"datafusion-common",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=10c349c033dded29097d0dc933fbc2f89f658032)",
|
||||
"lazy_static",
|
||||
"log-store",
|
||||
"metrics",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"prost",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"storage",
|
||||
"store-api",
|
||||
"strum 0.21.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"uuid",
|
||||
@@ -5615,7 +5611,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f"
|
||||
dependencies = [
|
||||
"darling 0.20.3",
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"num-bigint",
|
||||
"proc-macro-crate 1.3.1",
|
||||
"proc-macro-error",
|
||||
@@ -5729,24 +5725,6 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "native-tls"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"openssl",
|
||||
"openssl-probe",
|
||||
"openssl-sys",
|
||||
"schannel",
|
||||
"security-framework",
|
||||
"security-framework-sys",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "new_debug_unreachable"
|
||||
version = "1.0.4"
|
||||
@@ -5876,13 +5854,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num-derive"
|
||||
version = "0.3.3"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
|
||||
checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6107,50 +6085,12 @@ dependencies = [
|
||||
"tokio-rustls 0.24.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"cfg-if 1.0.0",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-macros"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-probe"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.90"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.17.0"
|
||||
@@ -6614,9 +6554,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pgwire"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2de42ee35f9694def25c37c15f564555411d9904b48e33680618ee7359080dc"
|
||||
checksum = "593c5af58c6394873b84c6fabf31f97e49ab29a56809e7fd240c1bcc4e5d272f"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.21.2",
|
||||
@@ -7066,7 +7006,7 @@ dependencies = [
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=940694cfd05c1e93c1dd7aab486184c9e2853098)",
|
||||
"promql-parser",
|
||||
"prost",
|
||||
"query",
|
||||
@@ -7106,7 +7046,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -7154,13 +7094,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "protobuf-build"
|
||||
version = "0.14.1"
|
||||
version = "0.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2df9942df2981178a930a72d442de47e2f0df18ad68e50a30f816f1848215ad0"
|
||||
checksum = "c852d9625b912c3e50480cdc701f60f49890b5d7ad46198dd583600f15e7c6ec"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"protobuf",
|
||||
"protobuf-codegen",
|
||||
"protobuf-src",
|
||||
"regex",
|
||||
]
|
||||
|
||||
@@ -7173,6 +7114,15 @@ dependencies = [
|
||||
"protobuf",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf-src"
|
||||
version = "1.1.0+21.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7ac8852baeb3cc6fb83b93646fb93c0ffe5d14bf138c945ceb4b9948ee0e3c1"
|
||||
dependencies = [
|
||||
"autotools",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptr_meta"
|
||||
version = "0.1.4"
|
||||
@@ -7309,7 +7259,9 @@ dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"approx_eq",
|
||||
"arc-swap",
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
"async-recursion",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"catalog",
|
||||
@@ -7336,7 +7288,7 @@ dependencies = [
|
||||
"format_num",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"greptime-proto",
|
||||
"greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=940694cfd05c1e93c1dd7aab486184c9e2853098)",
|
||||
"humantime",
|
||||
"metrics",
|
||||
"num",
|
||||
@@ -7421,22 +7373,21 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "raft-engine"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67b66e735395b7ff12f3ebbb4794006aecb365c4c9a82141279b58b227ac3a8b"
|
||||
source = "git+https://github.com/tikv/raft-engine.git?rev=2dcaf5beeea3d5de9ec9c7133a2451d00f508f52#2dcaf5beeea3d5de9ec9c7133a2451d00f508f52"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"crc32fast",
|
||||
"crossbeam",
|
||||
"fail",
|
||||
"fs2",
|
||||
"hashbrown 0.12.3",
|
||||
"hashbrown 0.14.0",
|
||||
"hex",
|
||||
"if_chain",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"lz4-sys",
|
||||
"nix 0.25.1",
|
||||
"nix 0.26.2",
|
||||
"num-derive",
|
||||
"num-traits",
|
||||
"parking_lot 0.12.1",
|
||||
@@ -7444,10 +7395,11 @@ dependencies = [
|
||||
"prometheus-static-metric",
|
||||
"protobuf",
|
||||
"rayon",
|
||||
"rhai",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_repr",
|
||||
"strum 0.24.1",
|
||||
"strum 0.25.0",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
@@ -7679,13 +7631,11 @@ dependencies = [
|
||||
"http-body",
|
||||
"hyper",
|
||||
"hyper-rustls",
|
||||
"hyper-tls",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"native-tls",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
@@ -7696,7 +7646,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
"tokio-rustls 0.24.1",
|
||||
"tokio-util",
|
||||
"tower-service",
|
||||
@@ -7705,6 +7654,7 @@ dependencies = [
|
||||
"wasm-bindgen-futures",
|
||||
"wasm-streams",
|
||||
"web-sys",
|
||||
"webpki-roots 0.22.6",
|
||||
"winreg",
|
||||
]
|
||||
|
||||
@@ -7752,6 +7702,32 @@ dependencies = [
|
||||
"bytemuck",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rhai"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c2a11a646ef5d4e4a9d5cf80c7e4ecb20f9b1954292d5c5e6d6cbc8d33728ec"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"bitflags 1.3.2",
|
||||
"instant",
|
||||
"num-traits",
|
||||
"rhai_codegen",
|
||||
"smallvec",
|
||||
"smartstring",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rhai_codegen"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db74e3fdd29d969a0ec1f8e79171a6f0f71d0429293656901db382d248c4c021"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
@@ -8039,7 +8015,7 @@ dependencies = [
|
||||
"bitflags 2.3.3",
|
||||
"errno 0.3.2",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.3",
|
||||
"linux-raw-sys 0.4.5",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
@@ -8712,9 +8688,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.179"
|
||||
version = "1.0.180"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0"
|
||||
checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
@@ -8731,9 +8707,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.179"
|
||||
version = "1.0.180"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c"
|
||||
checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -8863,7 +8839,6 @@ dependencies = [
|
||||
"common-grpc",
|
||||
"common-grpc-expr",
|
||||
"common-mem-prof",
|
||||
"common-pprof",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -8898,6 +8873,7 @@ dependencies = [
|
||||
"pgwire",
|
||||
"pin-project",
|
||||
"postgres-types",
|
||||
"pprof",
|
||||
"promql-parser",
|
||||
"prost",
|
||||
"query",
|
||||
@@ -9109,6 +9085,17 @@ version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
|
||||
[[package]]
|
||||
name = "smartstring"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"static_assertions",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snafu"
|
||||
version = "0.7.5"
|
||||
@@ -9126,7 +9113,7 @@ version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
@@ -9213,7 +9200,7 @@ dependencies = [
|
||||
"itertools 0.10.5",
|
||||
"once_cell",
|
||||
"snafu",
|
||||
"sqlparser",
|
||||
"sqlparser 0.34.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9259,6 +9246,18 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.34.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c3814f08afa19786b13d72b1731a1e8b3cac4ab9#c3814f08afa19786b13d72b1731a1e8b3cac4ab9"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"sqlparser 0.35.0",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c3814f08afa19786b13d72b1731a1e8b3cac4ab9)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.35.0"
|
||||
@@ -9266,7 +9265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca597d77c98894be1f965f2e4e2d2a61575d4998088e655476c73715c54b2b43"
|
||||
dependencies = [
|
||||
"log",
|
||||
"sqlparser_derive",
|
||||
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9280,6 +9279,16 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c3814f08afa19786b13d72b1731a1e8b3cac4ab9#c3814f08afa19786b13d72b1731a1e8b3cac4ab9"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlx"
|
||||
version = "0.6.3"
|
||||
@@ -9356,7 +9365,7 @@ checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9"
|
||||
dependencies = [
|
||||
"dotenvy",
|
||||
"either",
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -9558,6 +9567,15 @@ version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2"
|
||||
dependencies = [
|
||||
"strum_macros 0.21.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.24.1"
|
||||
@@ -9576,13 +9594,25 @@ dependencies = [
|
||||
"strum_macros 0.25.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
version = "0.21.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec"
|
||||
dependencies = [
|
||||
"heck 0.3.3",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
version = "0.24.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustversion",
|
||||
@@ -9595,7 +9625,7 @@ version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustversion",
|
||||
@@ -9645,7 +9675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3ae64fb7ad0670c7d6d53d57b1b91beb2212afc30e164cc8edb02d6b2cff32a"
|
||||
dependencies = [
|
||||
"gix",
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"prettyplease 0.2.12",
|
||||
"prost",
|
||||
"prost-build",
|
||||
@@ -9667,7 +9697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ac1ce8315086b127ca0abf162c62279550942bb26ebf7946fe17fe114446472"
|
||||
dependencies = [
|
||||
"git2",
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"prettyplease 0.2.12",
|
||||
"prost",
|
||||
"prost-build",
|
||||
@@ -9842,9 +9872,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||
|
||||
[[package]]
|
||||
name = "target-lexicon"
|
||||
version = "0.12.10"
|
||||
version = "0.12.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d2faeef5759ab89935255b1a4cd98e0baf99d1085e37d36599c625dac49ae8e"
|
||||
checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a"
|
||||
|
||||
[[package]]
|
||||
name = "temp-env"
|
||||
@@ -9953,6 +9983,7 @@ dependencies = [
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"uuid",
|
||||
@@ -10206,16 +10237,6 @@ dependencies = [
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-native-tls"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
|
||||
dependencies = [
|
||||
"native-tls",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.8"
|
||||
@@ -10715,7 +10736,7 @@ version = "0.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95d27d749378ceab6ec22188ed7ad102205c89ddb92ab662371c850ffc71aa1a"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -10733,7 +10754,7 @@ version = "0.0.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c8d9ecedde2fd77e975c38eeb9ca40b34ad0247b2259c6e6bbd2a8d6cc2444f"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"heck 0.4.1",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -11568,9 +11589,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "x509-certificate"
|
||||
version = "0.20.0"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2133ce6c08c050a5b368730a67c53a603ffd4a4a6c577c5218675a19f7782c05"
|
||||
checksum = "5e5d27c90840e84503cf44364de338794d5d5680bdd1da6272d13f80b0769ee0"
|
||||
dependencies = [
|
||||
"bcder",
|
||||
"bytes",
|
||||
|
||||
58
Cargo.toml
58
Cargo.toml
@@ -11,13 +11,13 @@ members = [
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/pprof",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -25,6 +25,7 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/version",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
"src/file-table-engine",
|
||||
@@ -49,6 +50,7 @@ members = [
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.3.2"
|
||||
@@ -74,7 +76,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git
|
||||
derive_builder = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "940694cfd05c1e93c1dd7aab486184c9e2853098" }
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
once_cell = "1.18"
|
||||
@@ -87,7 +89,9 @@ regex = "1.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.35"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c3814f08afa19786b13d72b1731a1e8b3cac4ab9", features = [
|
||||
"visitor",
|
||||
] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
@@ -96,6 +100,54 @@ tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-function-macro = { path = "src/common/function-macro" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
common-version = { path = "src/common/version" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-table-engine = { path = "src/file-table-engine" }
|
||||
frontend = { path = "src/frontend" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
partition = { path = "src/partition" }
|
||||
promql = { path = "src/promql" }
|
||||
query = { path = "src/query" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
table = { path = "src/table" }
|
||||
table-procedure = { path = "src/table-procedure" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
|
||||
6
Makefile
6
Makefile
@@ -83,11 +83,11 @@ fmt: ## Format all the Rust code.
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --option "indent_string= "
|
||||
taplo format
|
||||
|
||||
.PHONY: check-toml
|
||||
check-toml: ## Check all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
taplo format --check
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: multi-platform-buildx ## Build docker image.
|
||||
@@ -137,7 +137,7 @@ check: ## Cargo check all the targets.
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -- -D warnings
|
||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
|
||||
@@ -7,7 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { path = "../src/client" }
|
||||
client = { workspace = true }
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
|
||||
@@ -38,8 +38,9 @@ sync_write = false
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
type = "File"
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
@@ -13,6 +15,8 @@ datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
mode = "standalone"
|
||||
# Whether to use in-memory catalog, `false` by default.
|
||||
enable_memory_catalog = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# HTTP server options.
|
||||
[http_options]
|
||||
@@ -96,10 +98,10 @@ sync_write = false
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
|
||||
175
docs/rfcs/2023-08-04-tabie-trait-refactor.md
Normal file
175
docs/rfcs/2023-08-04-tabie-trait-refactor.md
Normal file
@@ -0,0 +1,175 @@
|
||||
---
|
||||
Feature Name: table-trait-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2065
|
||||
Date: 2023-08-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Trait
|
||||
--------------------
|
||||
|
||||
# Summary
|
||||
Refactor `Table` trait to adapt the new region server architecture and make code more straightforward.
|
||||
|
||||
# Motivation
|
||||
The `Table` is designed in the background of both frontend and datanode keeping the same concepts. And all the operations are served by a `Table`. However, in our practice, we found that not all the operations are suitable to be served by a `Table`. For example, the `Table` doesn't hold actual physical data itself, thus operations like write or alter are simply a proxy over underlying regions. And in the recent refactor to datanode ([rfc table-engine-refactor](./2023-07-06-table-engine-refactor.md)), we are changing datanode to region server that is only aware of `Region` things. This also calls for a refactor to the `Table` trait.
|
||||
|
||||
# Details
|
||||
|
||||
## Definitions
|
||||
|
||||
The current `Table` trait contains the following methods:
|
||||
```rust
|
||||
pub trait Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Insert values into table.
|
||||
///
|
||||
/// Returns number of inserted rows.
|
||||
async fn insert(&self, _request: InsertRequest) -> Result<usize>;
|
||||
|
||||
/// Generate a record batch stream for querying.
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
/// Tests whether the table provider can make use of any or all filter expressions
|
||||
/// to optimise data retrieval.
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>>;
|
||||
|
||||
/// Alter table.
|
||||
async fn alter(&self, _context: AlterContext, _request: &AlterTableRequest) -> Result<()>;
|
||||
|
||||
/// Delete rows in the table.
|
||||
///
|
||||
/// Returns number of deleted rows.
|
||||
async fn delete(&self, _request: DeleteRequest) -> Result<usize>;
|
||||
|
||||
/// Flush table.
|
||||
///
|
||||
/// Options:
|
||||
/// - region_number: specify region to flush.
|
||||
/// - wait: Whether to wait until flush is done.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
|
||||
/// Close the table.
|
||||
async fn close(&self, _regions: &[RegionNumber]) -> Result<()>;
|
||||
|
||||
/// Get region stats in this table.
|
||||
fn region_stats(&self) -> Result<Vec<RegionStat>>;
|
||||
|
||||
/// Return true if contains the region
|
||||
fn contains_region(&self, _region: RegionNumber) -> Result<bool>;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
async fn compact(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
We can divide those methods into three categories from the perspective of functionality:
|
||||
|
||||
| Retrieve Metadata | Manipulate Data | Read Data |
|
||||
| :------------------------: | :-------------: | :--------------: |
|
||||
| `schema` | `insert` | `scan_to_stream` |
|
||||
| `table_info` | `alter` | |
|
||||
| `table_type` | `delete` | |
|
||||
| `supports_filter_pushdown` | `flush` | |
|
||||
| `region_stats` | `close` | |
|
||||
| `contains_region` | `compact` | |
|
||||
| `statistics` | | |
|
||||
|
||||
And considering most of the access to metadata happens in frontend, like route or query; and all the persisted data are stored in regions; while only the query engine needs to read data. We can divide the `Table` trait into three concepts:
|
||||
|
||||
- struct `Table` provides metadata:
|
||||
```rust
|
||||
impl Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
fn to_data_source(&self) -> DataSourceRef;
|
||||
}
|
||||
```
|
||||
- Requests to region server
|
||||
- `InsertRequest`
|
||||
- `AlterRequest`
|
||||
- `DeleteRequest`
|
||||
- `FlushRequest`
|
||||
- `CompactRequest`
|
||||
- `CloseRequest`
|
||||
|
||||
- trait `DataSource` provides data (`RecordBatch`)
|
||||
```rust
|
||||
trait DataSource {
|
||||
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
}
|
||||
```
|
||||
|
||||
## Use `Table`
|
||||
|
||||
`Table` will only be used in frontend. It's constructed from the `OpenTableRequest` or `CreateTableRequest`.
|
||||
|
||||
`Table` also provides a method `to_data_source` to generate a `DataSource` from itself. But this method is only for non-`TableType::Base` tables (i.e., `TableType::View` and `TableType::Temporary`) because `TableType::Base` table doesn't hold actual data itself. Its `DataSource` should be constructed from the `Region` directly (in other words, it's a remote query).
|
||||
|
||||
And it requires some extra information to construct a `DataSource`, named `TableSourceProvider`:
|
||||
|
||||
```rust
|
||||
type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
|
||||
|
||||
pub enum TableSourceProvider {
|
||||
Base,
|
||||
View(LogicalPlan),
|
||||
Temporary(TableFactory),
|
||||
}
|
||||
```
|
||||
|
||||
## Use `DataSource`
|
||||
|
||||
`DataSource` will be adapted to the `TableProvider` from DataFusion that can be `scan()`ed in a `TableScan` plan.
|
||||
|
||||
In frontend this is done in the planning phase. And datanode will have one implementation for `Region` to generate record batch stream.
|
||||
|
||||
## Interact with RegionServer
|
||||
|
||||
Previously, persisted state change operations were through the old `Table` trait, like said before. Now they will come from the action source, like the procedure or protocol handler directly to the region server. E.g., on alter table, the corresponding procedure will generate its `AlterRequest` and send it to regions. Or write request will be split in frontend handler, and sent to regions. `Table` only provides necessary metadata like route information if needed, but not the necessary part anymore.
|
||||
|
||||
## Implement temporary table
|
||||
|
||||
Temporary table is a special table that doesn't revolves to any persistent physical region. Examples are:
|
||||
- the `Numbers` table for testing, which produces a record batch that contains 0-100 integers.
|
||||
- tables in information schema. It is an interface for querying catalog's metadata. The contents are generated on the fly with information from `CatalogManager`. The `CatalogManager` can be held in `TableFactory`.
|
||||
- Function table that produces data generated by a formula or a function. Like something that always `sin(current_timestamp())`.
|
||||
|
||||
## Relationship among those components
|
||||
|
||||
Here is a diagram to show the relationship among those components, and how they interact with each other.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
CatalogManager ||--|{ Table : manages
|
||||
Table ||--|{ DataStream : generates
|
||||
Table ||--|{ Region : routes
|
||||
Region ||--|{ DataStream : implements
|
||||
DataStream }|..|| QueryEngine : adapts-to
|
||||
Procedure ||--|{ Region : requests
|
||||
Protocol ||--|{ Region : writes
|
||||
Protocol ||--|{ QueryEngine : queries
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-05-03"
|
||||
channel = "nightly-2023-08-07"
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e
|
||||
set -e -x
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
OUT_DIR="${1:-$SCRIPT_DIR}"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
|
||||
@@ -61,7 +61,16 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
fi
|
||||
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
||||
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
if [ -n "${PACKAGE_NAME}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
||||
|
||||
# Extract the binary and clean the rest.
|
||||
tar xvf "${PACKAGE_NAME}" && \
|
||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
||||
rm -r "${PACKAGE_NAME}" && \
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -5,10 +5,10 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
greptime-proto.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -13,15 +13,17 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::BitVec;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Interval;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::{TimeType, TimestampType};
|
||||
use datatypes::types::{IntervalType, TimeType, TimestampType};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::{DdlRequest, QueryRequest};
|
||||
use greptime_proto::v1::{DdlRequest, IntervalMonthDayNano, QueryRequest};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -75,6 +77,11 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::TimeMillisecond => ConcreteDataType::time_millisecond_datatype(),
|
||||
ColumnDataType::TimeMicrosecond => ConcreteDataType::time_microsecond_datatype(),
|
||||
ColumnDataType::TimeNanosecond => ConcreteDataType::time_nanosecond_datatype(),
|
||||
ColumnDataType::IntervalYearMonth => ConcreteDataType::interval_year_month_datatype(),
|
||||
ColumnDataType::IntervalDayTime => ConcreteDataType::interval_day_time_datatype(),
|
||||
ColumnDataType::IntervalMonthDayNano => {
|
||||
ConcreteDataType::interval_month_day_nano_datatype()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,8 +118,12 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
TimeType::Microsecond(_) => ColumnDataType::TimeMicrosecond,
|
||||
TimeType::Nanosecond(_) => ColumnDataType::TimeNanosecond,
|
||||
},
|
||||
ConcreteDataType::Interval(_)
|
||||
| ConcreteDataType::Null(_)
|
||||
ConcreteDataType::Interval(i) => match i {
|
||||
IntervalType::YearMonth(_) => ColumnDataType::IntervalYearMonth,
|
||||
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
},
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
@@ -216,6 +227,18 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
time_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::IntervalDayTime => Values {
|
||||
interval_day_time_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::IntervalYearMonth => Values {
|
||||
interval_year_month_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::IntervalMonthDayNano => Values {
|
||||
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,7 +279,14 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Interval(_) | Value::List(_) => unreachable!(),
|
||||
Value::Interval(val) => match val.unit() {
|
||||
IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
|
||||
IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
|
||||
IntervalUnit::MonthDayNano => values
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_i128_to_interval(val.to_i128())),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
@@ -295,14 +325,26 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
|
||||
let interval = Interval::from_i128(v);
|
||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||
IntervalMonthDayNano {
|
||||
months,
|
||||
days,
|
||||
nanoseconds,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector,
|
||||
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
|
||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||
TimestampSecondVector, Vector,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -368,6 +410,14 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::TimeMillisecond, 2);
|
||||
let values = values.time_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::IntervalDayTime, 2);
|
||||
let values = values.interval_day_time_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::IntervalMonthDayNano, 2);
|
||||
let values = values.interval_month_day_nano_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -440,6 +490,18 @@ mod tests {
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -510,6 +572,24 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||
assert!(result.is_err());
|
||||
@@ -609,6 +689,50 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_interval_values() {
|
||||
let mut column = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
};
|
||||
|
||||
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().interval_year_month_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().interval_day_time_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
|
||||
let len = vector.len();
|
||||
push_vals(&mut column, 3, vector);
|
||||
(0..len).for_each(|i| {
|
||||
assert_eq!(
|
||||
7 + i as i64,
|
||||
column
|
||||
.values
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.interval_month_day_nano_values
|
||||
.get(i)
|
||||
.unwrap()
|
||||
.nanoseconds
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_vector() {
|
||||
use crate::v1::SemanticType;
|
||||
@@ -633,4 +757,13 @@ mod tests {
|
||||
let null_mask = column.null_mask;
|
||||
assert_eq!(34, null_mask[0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_i128_to_interval() {
|
||||
let i128_val = 3000;
|
||||
let interval = convert_i128_to_interval(i128_val);
|
||||
assert_eq!(interval.months, 0);
|
||||
assert_eq!(interval.days, 0);
|
||||
assert_eq!(interval.nanoseconds, 3000);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,45 +8,45 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
api = { workspace = true }
|
||||
arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-client = { workspace = true }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex.workspace = true
|
||||
serde = "1.0"
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
session = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
store-api = { path = "../store-api" }
|
||||
table = { path = "../table" }
|
||||
store-api = { workspace = true }
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
catalog = { path = ".", features = ["testing"] }
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
common-test-util = { workspace = true }
|
||||
log-store = { workspace = true }
|
||||
mito = { workspace = true, features = ["test"] }
|
||||
object-store = { workspace = true }
|
||||
storage = { workspace = true }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,26 +16,33 @@ mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::data_source::DataSource;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::TableType;
|
||||
use table::metadata::{TableIdent, TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::table_factory::TableFactory;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLES: &str = "tables";
|
||||
const COLUMNS: &str = "columns";
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
@@ -49,25 +56,97 @@ impl InformationSchemaProvider {
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
/// Build a map of [TableRef] in information schema.
|
||||
/// Including `tables` and `columns`.
|
||||
pub fn build(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> HashMap<String, TableRef> {
|
||||
let mut schema = HashMap::new();
|
||||
|
||||
schema.insert(
|
||||
TABLES.to_string(),
|
||||
Arc::new(InformationTable::new(
|
||||
catalog_name.clone(),
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
TABLES.to_string(),
|
||||
Arc::new(InformationSchemaTables::new(
|
||||
catalog_name.clone(),
|
||||
catalog_manager.clone(),
|
||||
)),
|
||||
)) as _,
|
||||
);
|
||||
schema.insert(
|
||||
COLUMNS.to_string(),
|
||||
Arc::new(InformationTable::new(
|
||||
catalog_name.clone(),
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
|
||||
COLUMNS.to_string(),
|
||||
Arc::new(InformationSchemaColumns::new(catalog_name, catalog_manager)),
|
||||
)) as _,
|
||||
);
|
||||
|
||||
schema
|
||||
}
|
||||
|
||||
pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let stream_builder = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
COLUMNS => Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => (
|
||||
Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
),
|
||||
COLUMNS => (
|
||||
Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
|
||||
),
|
||||
_ => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
|
||||
Ok(Some(Arc::new(InformationTable::new(
|
||||
self.catalog_name.clone(),
|
||||
table_id,
|
||||
name.to_string(),
|
||||
stream_builder,
|
||||
))))
|
||||
}
|
||||
|
||||
pub fn table_factory(&self, name: &str) -> Result<Option<TableFactory>> {
|
||||
let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => (
|
||||
Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
),
|
||||
COLUMNS => (
|
||||
Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
|
||||
),
|
||||
_ => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
let data_source = Arc::new(InformationTable::new(
|
||||
self.catalog_name.clone(),
|
||||
table_id,
|
||||
name.to_string(),
|
||||
stream_builder,
|
||||
));
|
||||
|
||||
Ok(Some(Arc::new(move || data_source.clone())))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,12 +159,25 @@ pub trait InformationStreamBuilder: Send + Sync {
|
||||
}
|
||||
|
||||
pub struct InformationTable {
|
||||
catalog_name: String,
|
||||
table_id: TableId,
|
||||
name: String,
|
||||
stream_builder: Arc<dyn InformationStreamBuilder>,
|
||||
}
|
||||
|
||||
impl InformationTable {
|
||||
pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
|
||||
Self { stream_builder }
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
table_id: TableId,
|
||||
name: String,
|
||||
stream_builder: Arc<dyn InformationStreamBuilder>,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
table_id,
|
||||
name,
|
||||
stream_builder,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,14 +192,39 @@ impl Table for InformationTable {
|
||||
}
|
||||
|
||||
fn table_info(&self) -> table::metadata::TableInfoRef {
|
||||
unreachable!("Should not call table_info() of InformationTable directly")
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(self.stream_builder.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(
|
||||
TableInfoBuilder::default()
|
||||
.ident(TableIdent {
|
||||
table_id: self.table_id,
|
||||
version: 0,
|
||||
})
|
||||
.name(self.name.clone())
|
||||
.catalog_name(self.catalog_name.clone())
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(TableType::Temporary)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
fn table_type(&self) -> table::metadata::TableType {
|
||||
TableType::View
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
self.get_stream(request)
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTable {
|
||||
fn get_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
let projection = request.projection;
|
||||
let projected_schema = if let Some(projection) = &projection {
|
||||
Arc::new(
|
||||
|
||||
@@ -16,7 +16,8 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
|
||||
SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
@@ -31,7 +32,8 @@ use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::InformationStreamBuilder;
|
||||
use super::tables::InformationSchemaTables;
|
||||
use super::{InformationStreamBuilder, COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -52,19 +54,22 @@ const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaColumnsBuilder {
|
||||
@@ -153,9 +158,28 @@ impl InformationSchemaColumnsBuilder {
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
let (keys, schema) = if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
(keys.clone(), schema)
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
(vec![], InformationSchemaColumns::schema())
|
||||
} else if table_name == TABLES {
|
||||
(vec![], InformationSchemaTables::schema())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
|
||||
@@ -15,7 +15,10 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
@@ -29,6 +32,7 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::{COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -43,19 +47,22 @@ pub(super) struct InformationSchemaTables {
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
@@ -137,9 +144,6 @@ impl InformationSchemaTablesBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
if !catalog_manager
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await?
|
||||
@@ -151,16 +155,43 @@ impl InformationSchemaTablesBuilder {
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
} else if table_name == TABLES {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ pub mod local;
|
||||
mod metrics;
|
||||
pub mod remote;
|
||||
pub mod system;
|
||||
pub mod table_factory;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
@@ -48,7 +49,7 @@ pub trait CatalogManager: Send + Sync {
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
|
||||
async fn register_catalog(&self, name: String) -> Result<bool>;
|
||||
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool>;
|
||||
|
||||
/// Register a schema with catalog name and schema name. Retuens whether the
|
||||
/// schema registered.
|
||||
@@ -217,13 +218,27 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else {
|
||||
return (region_number, region_stats);
|
||||
};
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else { continue };
|
||||
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else {
|
||||
continue;
|
||||
};
|
||||
for schema_name in schema_names {
|
||||
let Ok(table_names) = catalog_manager.table_names(&catalog_name,&schema_name).await else { continue };
|
||||
let Ok(table_names) = catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
|
||||
let Ok(Some(table)) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let table_info = table.table_info();
|
||||
let region_numbers = &table_info.meta.region_numbers;
|
||||
|
||||
@@ -43,7 +43,6 @@ use crate::error::{
|
||||
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
|
||||
TableNotFoundSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::local::memory::MemoryCatalogManager;
|
||||
use crate::system::{
|
||||
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
|
||||
@@ -51,9 +50,8 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterSchemaRequest,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest,
|
||||
handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -118,11 +116,18 @@ impl LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn init_system_catalog(&self) -> Result<()> {
|
||||
// register default catalog and default schema
|
||||
self.catalogs
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
|
||||
self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
|
||||
// register SystemCatalogTable
|
||||
let _ = self
|
||||
.catalogs
|
||||
self.catalogs
|
||||
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
@@ -133,16 +138,7 @@ impl LocalCatalogManager {
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
table: self.system.information_schema.system.clone(),
|
||||
};
|
||||
let _ = self.catalogs.register_table(register_table_req).await?;
|
||||
|
||||
// register default catalog and default schema
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
self.catalogs.register_table(register_table_req).await?;
|
||||
|
||||
// Add numbers table for test
|
||||
let numbers_table = Arc::new(NumbersTable::default());
|
||||
@@ -154,8 +150,7 @@ impl LocalCatalogManager {
|
||||
table: numbers_table,
|
||||
};
|
||||
|
||||
let _ = self
|
||||
.catalogs
|
||||
self.catalogs
|
||||
.register_table(register_number_table_req)
|
||||
.await?;
|
||||
|
||||
@@ -230,9 +225,8 @@ impl LocalCatalogManager {
|
||||
for entry in entries {
|
||||
match entry {
|
||||
Entry::Catalog(c) => {
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_if_absent(c.catalog_name.clone());
|
||||
self.catalogs
|
||||
.register_catalog_sync(c.catalog_name.clone())?;
|
||||
info!("Register catalog: {}", c.catalog_name);
|
||||
}
|
||||
Entry::Schema(s) => {
|
||||
@@ -548,13 +542,6 @@ impl CatalogManager for LocalCatalogManager {
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
let manager: CatalogManagerRef = self.catalogs.clone() as _;
|
||||
let provider =
|
||||
InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
|
||||
return provider.table(table_name);
|
||||
}
|
||||
|
||||
self.catalogs
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
@@ -584,8 +571,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
self.catalogs.table_names(catalog_name, schema_name).await
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
self.catalogs.register_catalog(name).await
|
||||
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
|
||||
self.catalogs.clone().register_catalog(name).await
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
|
||||
@@ -16,9 +16,11 @@ use std::any::Any;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
};
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
@@ -28,6 +30,7 @@ use table::TableRef;
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::{
|
||||
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
@@ -42,24 +45,6 @@ pub struct MemoryCatalogManager {
|
||||
pub table_id: AtomicU32,
|
||||
}
|
||||
|
||||
impl Default for MemoryCatalogManager {
|
||||
fn default() -> Self {
|
||||
let manager = Self {
|
||||
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
catalogs: Default::default(),
|
||||
};
|
||||
|
||||
let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
|
||||
let _ = manager
|
||||
.catalogs
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
|
||||
|
||||
manager
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for MemoryCatalogManager {
|
||||
async fn next_table_id(&self) -> table::error::Result<TableId> {
|
||||
@@ -250,7 +235,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
|
||||
self.register_catalog_sync(name)
|
||||
}
|
||||
|
||||
@@ -260,6 +245,28 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
impl MemoryCatalogManager {
|
||||
/// Create a manager with some default setups
|
||||
/// (e.g. default catalog/schema and information schema)
|
||||
pub fn with_default_setup() -> Arc<Self> {
|
||||
let manager = Arc::new(Self {
|
||||
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
catalogs: Default::default(),
|
||||
});
|
||||
|
||||
// Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
|
||||
manager
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
manager
|
||||
}
|
||||
|
||||
/// Registers a catalog and return the catalog already exist
|
||||
pub fn register_catalog_if_absent(&self, name: String) -> bool {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
@@ -273,12 +280,13 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
|
||||
pub fn register_catalog_sync(self: &Arc<Self>, name: String) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
|
||||
match catalogs.entry(name) {
|
||||
match catalogs.entry(name.clone()) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
let catalog = self.create_catalog_entry(name);
|
||||
e.insert(catalog);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
@@ -332,9 +340,19 @@ impl MemoryCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let information_schema = InformationSchemaProvider::build(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
);
|
||||
let mut catalog = HashMap::new();
|
||||
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
|
||||
catalog
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_with_table(table: TableRef) -> Self {
|
||||
let manager = Self::default();
|
||||
pub fn new_with_table(table: TableRef) -> Arc<Self> {
|
||||
let manager = Self::with_default_setup();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
@@ -349,7 +367,7 @@ impl MemoryCatalogManager {
|
||||
|
||||
/// Create a memory catalog list contains a numbers table for test
|
||||
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
Ok(Arc::new(MemoryCatalogManager::default()))
|
||||
Ok(MemoryCatalogManager::with_default_setup())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -392,7 +410,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_manager_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let catalog = MemoryCatalogManager::with_default_setup();
|
||||
let table_name = "test_table";
|
||||
assert!(!catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
@@ -456,7 +474,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let catalog = MemoryCatalogManager::with_default_setup();
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
@@ -507,14 +525,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
let list = MemoryCatalogManager::with_default_setup();
|
||||
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
|
||||
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let catalog = MemoryCatalogManager::with_default_setup();
|
||||
let table_name = "foo_table";
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
@@ -549,7 +567,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_deregister_schema() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let catalog = MemoryCatalogManager::with_default_setup();
|
||||
|
||||
// Registers a catalog, a schema, and a table.
|
||||
let catalog_name = "foo_catalog".to_string();
|
||||
@@ -567,6 +585,7 @@ mod tests {
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog
|
||||
.clone()
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -67,7 +67,7 @@ impl RemoteCatalogManager {
|
||||
backend,
|
||||
system_table_requests: Default::default(),
|
||||
region_alive_keepers,
|
||||
memory_catalog_manager: Arc::new(MemoryCatalogManager::default()),
|
||||
memory_catalog_manager: MemoryCatalogManager::with_default_setup(),
|
||||
table_metadata_manager,
|
||||
}
|
||||
}
|
||||
@@ -254,7 +254,10 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
let Some(table) = self
|
||||
.memory_catalog_manager
|
||||
.table(&request.catalog, &request.schema, &request.table_name)
|
||||
.await? else { return Ok(()) };
|
||||
.await?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_ident = TableIdent {
|
||||
@@ -383,6 +386,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
if remote_catalog_exists
|
||||
&& self
|
||||
.memory_catalog_manager
|
||||
.clone()
|
||||
.register_catalog(catalog.to_string())
|
||||
.await?
|
||||
{
|
||||
@@ -420,7 +424,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
.await
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
|
||||
self.memory_catalog_manager.register_catalog_sync(name)
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ use table::engine::{CloseTableResult, EngineContext, TableEngine};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{
|
||||
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
TruncateTableRequest,
|
||||
};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
@@ -116,4 +117,12 @@ impl TableEngine for MockTableEngine {
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn truncate_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: TruncateTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::metadata::{TableId, TableInfoRef, TableType};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
@@ -71,6 +71,10 @@ impl Table for SystemCatalogTable {
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
self.0.table_type()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> TableResult<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
@@ -264,7 +268,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
columns_values.extend(primary_key_columns);
|
||||
|
||||
let _ = columns_values.insert(
|
||||
"value".to_string(),
|
||||
@@ -523,7 +527,7 @@ mod tests {
|
||||
EngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
Arc::new(NoopLogStore),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
)
|
||||
@@ -574,9 +578,15 @@ mod tests {
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let Value::UInt8(entry_type) = row[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
let Value::Binary(key) = row[1].clone() else {
|
||||
unreachable!()
|
||||
};
|
||||
let Value::Binary(value) = row[3].clone() else {
|
||||
unreachable!()
|
||||
};
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
|
||||
@@ -12,19 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_runtime::TaskFunction;
|
||||
use std::sync::Arc;
|
||||
|
||||
struct ManifestGcTask {}
|
||||
use table::data_source::DataSourceRef;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<()> for ManifestGcTask {
|
||||
/// Invoke the task.
|
||||
async fn call(&mut self) -> std::result::Result<(), ()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// Name of the task.
|
||||
fn name(&self) -> &str {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
pub type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
|
||||
@@ -45,8 +45,8 @@ impl DfTableSourceProvider {
|
||||
catalog_manager,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ mod tests {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
|
||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::Bare {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
|
||||
@@ -26,7 +26,9 @@ mod tests {
|
||||
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
|
||||
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
|
||||
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
|
||||
};
|
||||
use common_meta::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
@@ -179,12 +181,17 @@ mod tests {
|
||||
catalog_manager.catalog_names().await.unwrap()
|
||||
);
|
||||
|
||||
let mut schema_names = catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap();
|
||||
schema_names.sort_unstable();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string()
|
||||
],
|
||||
schema_names
|
||||
);
|
||||
}
|
||||
|
||||
@@ -240,13 +247,18 @@ mod tests {
|
||||
async fn test_register_table() {
|
||||
let node_id = 42;
|
||||
let components = prepare_components(node_id).await;
|
||||
let mut schema_names = components
|
||||
.catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap();
|
||||
schema_names.sort_unstable();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
],
|
||||
schema_names
|
||||
);
|
||||
|
||||
// register a new table with an nonexistent catalog
|
||||
@@ -309,21 +321,16 @@ mod tests {
|
||||
// register catalog to catalog manager
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.clone()
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.is_ok());
|
||||
assert_eq!(
|
||||
HashSet::<String>::from_iter(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
|
||||
),
|
||||
HashSet::from_iter(
|
||||
components
|
||||
.catalog_manager
|
||||
.catalog_names()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
)
|
||||
HashSet::<String>::from_iter(vec![
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
catalog_name.clone()
|
||||
]),
|
||||
HashSet::from_iter(components.catalog_manager.catalog_names().await.unwrap())
|
||||
);
|
||||
|
||||
let table_to_register = components
|
||||
@@ -380,7 +387,7 @@ mod tests {
|
||||
.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
HashSet::from([schema_name.clone()]),
|
||||
HashSet::from([schema_name.clone(), INFORMATION_SCHEMA_NAME.to_string()]),
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(&catalog_name)
|
||||
|
||||
@@ -8,20 +8,20 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
@@ -34,13 +34,13 @@ tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
datanode = { path = "../datanode" }
|
||||
common-grpc-expr = { workspace = true }
|
||||
datanode = { workspace = true }
|
||||
derive-new = "0.5"
|
||||
substrait = { path = "../common/substrait" }
|
||||
prost.workspace = true
|
||||
substrait = { workspace = true }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
|
||||
@@ -13,56 +13,56 @@ path = "src/bin/greptime.rs"
|
||||
default = ["metrics-process"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
metrics-process = ["servers/metrics-process"]
|
||||
greptimedb-telemetry = [
|
||||
"datanode/greptimedb-telemetry",
|
||||
"meta-srv/greptimedb-telemetry",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
catalog = { path = "../catalog" }
|
||||
catalog = { workspace = true }
|
||||
chrono.workspace = true
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
client = { workspace = true }
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { path = "../datanode" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
datanode = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
frontend = { path = "../frontend" }
|
||||
frontend = { workspace = true }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true }
|
||||
metrics.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { path = "../partition" }
|
||||
query = { path = "../query" }
|
||||
partition = { workspace = true }
|
||||
query = { workspace = true }
|
||||
rand.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
session = { path = "../session" }
|
||||
servers = { workspace = true }
|
||||
session = { workspace = true }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
table = { path = "../table" }
|
||||
tikv-jemallocator = "0.5"
|
||||
substrait = { workspace = true }
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
temp-env = "0.3"
|
||||
common-test-util = { workspace = true }
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
toml.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { path = "../common/version" }
|
||||
common-version = { workspace = true }
|
||||
|
||||
@@ -187,6 +187,7 @@ fn log_env_flags() {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
|
||||
@@ -12,26 +12,32 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_meta::helper::TableGlobalValue;
|
||||
use common_meta::error as MetaError;
|
||||
use common_meta::helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
|
||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{RegionDistribution, TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::TableMetaKey;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::range_stream::PaginationStream;
|
||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::util::get_prefix_end_key;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use futures::TryStreamExt;
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::KvStoreRef;
|
||||
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{ConnectEtcdSnafu, Result};
|
||||
use crate::error::{self, ConnectEtcdSnafu, Result};
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct UpgradeCommand {
|
||||
@@ -63,56 +69,167 @@ struct MigrateTableMetadata {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MigrateTableMetadata {
|
||||
// migrates database's metadata from 0.3 to 0.4.
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
let mut key = b"__tg".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut processed_keys = 0;
|
||||
loop {
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
|
||||
let req = RangeRequest::new()
|
||||
.with_range(key, range_end.clone())
|
||||
.with_limit(1000);
|
||||
let resp = self.etcd_store.range(req).await.unwrap();
|
||||
for kv in resp.kvs.iter() {
|
||||
let key = String::from_utf8_lossy(kv.key());
|
||||
let value = TableGlobalValue::from_bytes(kv.value())
|
||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
||||
|
||||
self.create_table_name_key(&value).await;
|
||||
|
||||
self.create_datanode_table_keys(&value).await;
|
||||
|
||||
self.split_table_global_value(&key, value).await;
|
||||
}
|
||||
|
||||
self.delete_migrated_keys(&resp).await;
|
||||
|
||||
processed_keys += resp.kvs.len();
|
||||
|
||||
if resp.more {
|
||||
key = get_prefix_end_key(resp.kvs.last().unwrap().key());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
info!("Total migrated TableGlobalKeys: {processed_keys}");
|
||||
self.migrate_table_global_values().await?;
|
||||
self.migrate_catalog_keys().await?;
|
||||
self.migrate_schema_keys().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const PAGE_SIZE: usize = 1000;
|
||||
|
||||
impl MigrateTableMetadata {
|
||||
async fn delete_migrated_keys(&self, resp: &RangeResponse) {
|
||||
info!("Deleting {} TableGlobalKeys", resp.kvs.len());
|
||||
let req = BatchDeleteRequest {
|
||||
keys: resp.kvs.iter().map(|kv| kv.key().to_vec()).collect(),
|
||||
prev_kv: false,
|
||||
};
|
||||
async fn migrate_schema_keys(&self) -> Result<()> {
|
||||
// The schema key prefix.
|
||||
let key = b"__s".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key_str =
|
||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
||||
let key = v1SchemaKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok((key, ()))
|
||||
}),
|
||||
);
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_schema_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
info!("Total migrated SchemaKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
|
||||
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
|
||||
let schema_name_value = SchemaNameValue;
|
||||
|
||||
info!("Creating '{new_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store.batch_delete(req).await.unwrap();
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.as_raw_key())
|
||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_catalog_keys(&self) -> Result<()> {
|
||||
// The catalog key prefix.
|
||||
let key = b"__c".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key_str =
|
||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
||||
let key = v1CatalogKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok((key, ()))
|
||||
}),
|
||||
);
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_catalog_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
info!("Total migrated CatalogKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
|
||||
let new_key = CatalogNameKey::new(&key.catalog_name);
|
||||
let catalog_name_value = CatalogNameValue;
|
||||
|
||||
info!("Creating '{new_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.as_raw_key())
|
||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn migrate_table_global_values(&self) -> Result<()> {
|
||||
let key = b"__tg".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
RangeRequest::new().with_range(key, range_end.clone()),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key = String::from_utf8_lossy(kv.key()).to_string();
|
||||
let value = TableGlobalValue::from_bytes(kv.value())
|
||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
||||
|
||||
Ok((key, value))
|
||||
}),
|
||||
);
|
||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
self.create_table_name_key(&value).await;
|
||||
|
||||
self.create_datanode_table_keys(&value).await;
|
||||
|
||||
self.split_table_global_value(&key, value).await;
|
||||
|
||||
keys.push(key.as_bytes().to_vec());
|
||||
}
|
||||
|
||||
info!("Total migrated TableGlobalKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
|
||||
for keys in keys.chunks(PAGE_SIZE) {
|
||||
info!("Deleting {} TableGlobalKeys", keys.len());
|
||||
let req = BatchDeleteRequest {
|
||||
keys: keys.to_vec(),
|
||||
prev_kv: false,
|
||||
};
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store.batch_delete(req).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
@@ -143,9 +143,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig {
|
||||
data_home: data_home.clone(),
|
||||
});
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
@@ -185,7 +183,9 @@ mod tests {
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig,
|
||||
};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -243,8 +243,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let Options::Datanode(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
@@ -268,16 +270,11 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match &options.storage.store {
|
||||
ObjectStoreConfig::File(FileConfig { data_home, .. }) => {
|
||||
assert_eq!("/tmp/greptimedb/", data_home)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Azblob { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Gcs { .. } => unreachable!(),
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
@@ -397,10 +394,10 @@ mod tests {
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
[
|
||||
(
|
||||
// storage.manifest.gc_duration = 9s
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
@@ -411,7 +408,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// storage.compaction.max_purge_tasks = 99
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"compaction".to_uppercase(),
|
||||
@@ -422,7 +419,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
@@ -440,7 +437,10 @@ mod tests {
|
||||
};
|
||||
|
||||
let Options::Datanode(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(
|
||||
|
||||
@@ -23,6 +23,12 @@ use snafu::{Location, Snafu};
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to iter stream, source: {}", source))]
|
||||
IterStream {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode, source: {}", source))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
@@ -176,6 +182,7 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::IterStream { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
|
||||
@@ -257,8 +257,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let Options::Frontend(opts) = command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
|
||||
assert_eq!(
|
||||
@@ -323,8 +325,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
let Options::Frontend(fe_opts) = command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
@@ -404,10 +408,10 @@ mod tests {
|
||||
|
||||
let env_prefix = "FRONTEND_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
[
|
||||
(
|
||||
// mysql_options.addr = 127.0.0.1:14002
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
@@ -417,7 +421,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// mysql_options.runtime_size = 11
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"runtime_size".to_uppercase(),
|
||||
@@ -427,7 +431,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
@@ -437,7 +441,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
@@ -458,8 +462,10 @@ mod tests {
|
||||
log_dir: None,
|
||||
log_level: Some("error".to_string()),
|
||||
};
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
let Options::Frontend(fe_opts) = command.load_options(top_level_opts).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
|
||||
|
||||
@@ -187,8 +187,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
@@ -216,8 +218,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
@@ -269,20 +273,20 @@ mod tests {
|
||||
|
||||
let env_prefix = "METASRV_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
[
|
||||
(
|
||||
// bind_addr = 127.0.0.1:14002
|
||||
vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
[env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:14002"),
|
||||
),
|
||||
(
|
||||
// server_addr = 127.0.0.1:13002
|
||||
vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
[env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:13002"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
@@ -300,7 +304,10 @@ mod tests {
|
||||
};
|
||||
|
||||
let Options::Metasrv(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||
|
||||
@@ -158,10 +158,10 @@ mod tests {
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
vec![
|
||||
[
|
||||
(
|
||||
// storage.manifest.checkpoint_margin = 99
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
@@ -172,7 +172,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// storage.type = S3
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"type".to_uppercase(),
|
||||
@@ -182,7 +182,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// storage.bucket = mybucket
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"bucket".to_uppercase(),
|
||||
@@ -192,7 +192,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// storage.manifest.gc_duration = 42s
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
@@ -203,7 +203,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// storage.manifest.checkpoint_on_startup = true
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
@@ -214,7 +214,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"wal".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
@@ -224,7 +224,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
|
||||
@@ -83,6 +83,7 @@ impl SubCommand {
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub enable_telemetry: bool,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -102,6 +103,7 @@ impl Default for StandaloneOptions {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
enable_telemetry: true,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -139,6 +141,7 @@ impl StandaloneOptions {
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
enable_telemetry: self.enable_telemetry,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
procedure: self.procedure,
|
||||
@@ -423,7 +426,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let fe_opts = options.fe_opts;
|
||||
let dn_opts = options.dn_opts;
|
||||
let logging_opts = options.logging;
|
||||
@@ -484,7 +490,8 @@ mod tests {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap() else {
|
||||
.unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -508,10 +515,10 @@ mod tests {
|
||||
|
||||
let env_prefix = "STANDALONE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
[
|
||||
(
|
||||
// logging.dir = /other/log/dir
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
@@ -521,7 +528,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// logging.level = info
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"level".to_uppercase(),
|
||||
@@ -531,7 +538,7 @@ mod tests {
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
@@ -552,8 +559,10 @@ mod tests {
|
||||
log_dir: None,
|
||||
log_level: None,
|
||||
};
|
||||
let Options::Standalone(opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
let Options::Standalone(opts) = command.load_options(top_level_opts).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { path = "../error" }
|
||||
common-error = { workspace = true }
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::ops::Deref;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
/// Bytes buffer.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)]
|
||||
pub struct Bytes(bytes::Bytes);
|
||||
|
||||
impl From<Bytes> for bytes::Bytes {
|
||||
@@ -80,7 +80,7 @@ impl PartialEq<Bytes> for [u8] {
|
||||
///
|
||||
/// Now this buffer is restricted to only hold valid UTF-8 string (only allow constructing `StringBytes`
|
||||
/// from String or str). We may support other encoding in the future.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct StringBytes(bytes::Bytes);
|
||||
|
||||
impl StringBytes {
|
||||
|
||||
@@ -5,7 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
common-error = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -29,6 +29,10 @@ pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
|
||||
pub const SCRIPTS_TABLE_ID: u32 = 1;
|
||||
/// numbers table id
|
||||
pub const NUMBERS_TABLE_ID: u32 = 2;
|
||||
/// id for information_schema.tables
|
||||
pub const INFORMATION_SCHEMA_TABLES_TABLE_ID: u32 = 3;
|
||||
/// id for information_schema.columns
|
||||
pub const INFORMATION_SCHEMA_COLUMNS_TABLE_ID: u32 = 4;
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
|
||||
|
||||
@@ -32,6 +32,33 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to parse catalog and schema from given database name
|
||||
///
|
||||
/// The database name may come from different sources:
|
||||
///
|
||||
/// - MySQL `schema` name in MySQL protocol login request: it's optional and user
|
||||
/// and switch database using `USE` command
|
||||
/// - Postgres `database` parameter in Postgres wire protocol, required
|
||||
/// - HTTP RESTful API: the database parameter, optional
|
||||
/// - gRPC: the dbname field in header, optional but has a higher priority than
|
||||
/// original catalog/schema
|
||||
///
|
||||
/// When database name is provided, we attempt to parse catalog and schema from
|
||||
/// it. We assume the format `[<catalog>-]<schema>`:
|
||||
///
|
||||
/// - If `[<catalog>-]` part is not provided, we use whole database name as
|
||||
/// schema name
|
||||
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
|
||||
/// `<catalog>` and `<schema>`.
|
||||
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
|
||||
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
|
||||
if parts.len() == 2 {
|
||||
(parts[0], parts[1])
|
||||
} else {
|
||||
(DEFAULT_CATALOG_NAME, db)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -41,4 +68,22 @@ mod tests {
|
||||
assert_eq!("test", build_db_string(DEFAULT_CATALOG_NAME, "test"));
|
||||
assert_eq!("a0b1c2d3-test", build_db_string("a0b1c2d3", "test"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_catalog_and_schema() {
|
||||
assert_eq!(
|
||||
(DEFAULT_CATALOG_NAME, "fullschema"),
|
||||
parse_catalog_and_schema_from_db_string("fullschema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
("catalog", "schema"),
|
||||
parse_catalog_and_schema_from_db_string("catalog-schema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
("catalog", "schema1-schema2"),
|
||||
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
arrow.workspace = true
|
||||
async-compression = { version = "0.3", features = [
|
||||
"bzip2",
|
||||
"gzip",
|
||||
@@ -17,19 +17,20 @@ async-compression = { version = "0.3", features = [
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-error = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
datafusion.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
object-store = { workspace = true }
|
||||
orc-rust = "0.2"
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
strum = { version = "0.21", features = ["derive"] }
|
||||
tokio-util.workspace = true
|
||||
tokio.workspace = true
|
||||
url = "2.3"
|
||||
paste = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../test-util" }
|
||||
common-test-util = { workspace = true }
|
||||
|
||||
@@ -20,11 +20,12 @@ use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdD
|
||||
use async_compression::tokio::write;
|
||||
use bytes::Bytes;
|
||||
use futures::Stream;
|
||||
use strum::EnumIter;
|
||||
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)]
|
||||
pub enum CompressionType {
|
||||
/// Gzip-ed file
|
||||
Gzip,
|
||||
|
||||
@@ -209,15 +209,19 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::{
|
||||
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
};
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
test_util::get_data_dir("tests/csv").display().to_string()
|
||||
find_workspace_path("/src/common/datasource/tests/csv")
|
||||
.display()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -167,12 +167,16 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
test_util::get_data_dir("tests/json").display().to_string()
|
||||
find_workspace_path("/src/common/datasource/tests/json")
|
||||
.display()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -188,19 +188,22 @@ impl FileOpener for OrcOpener {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
test_util::get_data_dir("tests/orc").display().to_string()
|
||||
find_workspace_path("/src/common/datasource/tests/orc")
|
||||
.display()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_orc_infer_schema() {
|
||||
let orc = OrcFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let schema = OrcFormat.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -158,11 +158,13 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
use super::*;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
test_util::get_data_dir("tests/parquet")
|
||||
find_workspace_path("/src/common/datasource/tests/parquet")
|
||||
.display()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::vec;
|
||||
|
||||
use common_test_util::find_workspace_path;
|
||||
use datafusion::assert_batches_eq;
|
||||
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
@@ -71,7 +72,7 @@ async fn test_json_opener() {
|
||||
CompressionType::Uncompressed,
|
||||
);
|
||||
|
||||
let path = &test_util::get_data_dir("tests/json/basic.json")
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
|
||||
.display()
|
||||
.to_string();
|
||||
let tests = [
|
||||
@@ -111,7 +112,7 @@ async fn test_csv_opener() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
let path = &test_util::get_data_dir("tests/csv/basic.csv")
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string();
|
||||
let csv_conf = CsvConfigBuilder::default()
|
||||
@@ -160,7 +161,7 @@ async fn test_parquet_exec() {
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let path = &test_util::get_data_dir("tests/parquet/basic.parquet")
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/parquet/basic.parquet")
|
||||
.display()
|
||||
.to_string();
|
||||
let base_config = scan_config(schema.clone(), None, path);
|
||||
@@ -181,7 +182,7 @@ async fn test_parquet_exec() {
|
||||
.await;
|
||||
|
||||
assert_batches_eq!(
|
||||
vec![
|
||||
[
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
@@ -196,14 +197,15 @@ async fn test_parquet_exec() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_orc_opener() {
|
||||
let root = test_util::get_data_dir("tests/orc").display().to_string();
|
||||
let root = find_workspace_path("/src/common/datasource/tests/orc")
|
||||
.display()
|
||||
.to_string();
|
||||
let store = test_store(&root);
|
||||
let orc = OrcFormat::default();
|
||||
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let schema = OrcFormat.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let schema = Arc::new(schema);
|
||||
|
||||
let orc_opener = OrcOpener::new(store.clone(), schema.clone(), None);
|
||||
let path = &test_util::get_data_dir("/test.orc").display().to_string();
|
||||
let path = "test.orc";
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
@@ -31,13 +30,6 @@ use crate::test_util;
|
||||
|
||||
pub const TEST_BATCH_SIZE: usize = 100;
|
||||
|
||||
pub fn get_data_dir(path: &str) -> PathBuf {
|
||||
// https://doc.rust-lang.org/cargo/reference/environment-variables.html
|
||||
let dir = env!("CARGO_MANIFEST_DIR");
|
||||
|
||||
PathBuf::from(dir).join(path)
|
||||
}
|
||||
|
||||
pub fn format_schema(schema: Schema) -> Vec<String> {
|
||||
schema
|
||||
.fields()
|
||||
@@ -78,6 +70,9 @@ pub fn test_basic_schema() -> SchemaRef {
|
||||
}
|
||||
|
||||
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
||||
// object_store only recognize the Unix style path, so make it happy.
|
||||
let filename = &filename.replace('\\', "/");
|
||||
|
||||
FileScanConfig {
|
||||
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
||||
file_schema,
|
||||
@@ -124,12 +119,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
|
||||
// ignores `\n`
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(&written).trim_end_matches('\n'),
|
||||
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
|
||||
)
|
||||
assert_eq_lines(written, origin);
|
||||
}
|
||||
|
||||
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||
@@ -166,10 +156,19 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
assert_eq_lines(written, origin);
|
||||
}
|
||||
|
||||
// ignores `\n`
|
||||
// Ignore the CRLF difference across operating systems.
|
||||
fn assert_eq_lines(written: Vec<u8>, origin: Vec<u8>) {
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(&written).trim_end_matches('\n'),
|
||||
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
|
||||
String::from_utf8(written)
|
||||
.unwrap()
|
||||
.lines()
|
||||
.collect::<Vec<_>>(),
|
||||
String::from_utf8(origin)
|
||||
.unwrap()
|
||||
.lines()
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,50 +12,36 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
use crate::test_util;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream_to_json() {
|
||||
let origin_path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
// A small threshold
|
||||
// Triggers the flush each writes
|
||||
test_util::setup_stream_to_json_test(
|
||||
&test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size / 2,
|
||||
)
|
||||
.await;
|
||||
test_util::setup_stream_to_json_test(origin_path, |size| size / 2).await;
|
||||
|
||||
// A large threshold
|
||||
// Only triggers the flush at last
|
||||
test_util::setup_stream_to_json_test(
|
||||
&test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size * 2,
|
||||
)
|
||||
.await;
|
||||
test_util::setup_stream_to_json_test(origin_path, |size| size * 2).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream_to_csv() {
|
||||
let origin_path = &find_workspace_path("/src/common/datasource/tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
// A small threshold
|
||||
// Triggers the flush each writes
|
||||
test_util::setup_stream_to_csv_test(
|
||||
&test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size / 2,
|
||||
)
|
||||
.await;
|
||||
test_util::setup_stream_to_csv_test(origin_path, |size| size / 2).await;
|
||||
|
||||
// A large threshold
|
||||
// Only triggers the flush at last
|
||||
test_util::setup_stream_to_csv_test(
|
||||
&test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size * 2,
|
||||
)
|
||||
.await;
|
||||
test_util::setup_stream_to_csv_test(origin_path, |size| size * 2).await;
|
||||
}
|
||||
|
||||
@@ -66,6 +66,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
@@ -75,6 +76,16 @@ mod tests {
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("C:\\to\\path\\file").unwrap();
|
||||
assert_eq!(parsed.path(), "/C:/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("C:\\to\\path\\").unwrap();
|
||||
assert_eq!(parsed.path(), "/C:/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
|
||||
@@ -8,15 +8,15 @@ license.workspace = true
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
backtrace = "0.3"
|
||||
common-telemetry = { workspace = true }
|
||||
proc-macro2 = "1.0.66"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
proc-macro2 = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
common-query = { path = "../query" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
common-query = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
snafu.workspace = true
|
||||
static_assertions = "1.1.0"
|
||||
|
||||
@@ -146,7 +146,9 @@ pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
.expect("Expected an ident!")
|
||||
.to_string();
|
||||
if ident == "depth" {
|
||||
let Lit::Int(i) = &name_value.lit else { panic!("Expected 'depth' to be a valid int!") };
|
||||
let Lit::Int(i) = &name_value.lit else {
|
||||
panic!("Expected 'depth' to be a valid int!")
|
||||
};
|
||||
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -7,12 +7,12 @@ license.workspace = true
|
||||
[dependencies]
|
||||
arc-swap = "1.0"
|
||||
chrono-tz = "0.6"
|
||||
common-error = { path = "../error" }
|
||||
common-function-macro = { path = "../function-macro" }
|
||||
common-query = { path = "../query" }
|
||||
common-time = { path = "../time" }
|
||||
common-error = { workspace = true }
|
||||
common-function-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -89,7 +89,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_function_registry() {
|
||||
let registry = FunctionRegistry::default();
|
||||
let func = Arc::new(TestAndFunction::default());
|
||||
let func = Arc::new(TestAndFunction);
|
||||
|
||||
assert!(registry.get_function("test_and").is_none());
|
||||
assert!(registry.functions().is_empty());
|
||||
|
||||
@@ -15,18 +15,64 @@
|
||||
mod pow;
|
||||
mod rate;
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{GeneralDataFusionSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::VectorRef;
|
||||
pub use pow::PowFunction;
|
||||
pub use rate::RateFunction;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::function::FunctionContext;
|
||||
use super::Function;
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct MathFunction;
|
||||
|
||||
impl MathFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(PowFunction::default()));
|
||||
registry.register(Arc::new(RateFunction::default()))
|
||||
registry.register(Arc::new(PowFunction));
|
||||
registry.register(Arc::new(RateFunction));
|
||||
registry.register(Arc::new(RangeFunction))
|
||||
}
|
||||
}
|
||||
|
||||
/// `RangeFunction` will never be used as a normal function,
|
||||
/// just for datafusion to generate logical plan for RangeSelect
|
||||
#[derive(Clone, Debug, Default)]
|
||||
struct RangeFunction;
|
||||
|
||||
impl fmt::Display for RangeFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "RANGE_FN")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for RangeFunction {
|
||||
fn name(&self) -> &str {
|
||||
"range_fn"
|
||||
}
|
||||
|
||||
// range_fn will never been used, return_type could be arbitrary value, is not important
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
||||
/// In fact, the arguments loaded by `range_fn` are very complicated, and it is difficult to use `Signature` to describe
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::any(0, Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
Err(DataFusionError::Internal(
|
||||
"range_fn just a empty function used in range select, It should not be eval!".into(),
|
||||
))
|
||||
.context(GeneralDataFusionSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_pow_function() {
|
||||
let pow = PowFunction::default();
|
||||
let pow = PowFunction;
|
||||
|
||||
assert_eq!("pow", pow.name());
|
||||
assert_eq!(
|
||||
|
||||
@@ -80,7 +80,7 @@ mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_rate_function() {
|
||||
let rate = RateFunction::default();
|
||||
let rate = RateFunction;
|
||||
assert_eq!("prom_rate", rate.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
|
||||
@@ -25,6 +25,6 @@ pub(crate) struct NumpyFunction;
|
||||
|
||||
impl NumpyFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ClipFunction::default()));
|
||||
registry.register(Arc::new(ClipFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clip_signature() {
|
||||
let clip = ClipFunction::default();
|
||||
let clip = ClipFunction;
|
||||
|
||||
assert_eq!("clip", clip.name());
|
||||
assert_eq!(
|
||||
@@ -202,8 +202,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clip_fn_signed() {
|
||||
let clip = ClipFunction::default();
|
||||
|
||||
// eval with signed integers
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Int32Vector::from_values(0..10)),
|
||||
@@ -217,7 +215,9 @@ mod tests {
|
||||
)),
|
||||
];
|
||||
|
||||
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
|
||||
let vector = ClipFunction
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
assert_eq!(10, vector.len());
|
||||
|
||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
||||
@@ -234,8 +234,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clip_fn_unsigned() {
|
||||
let clip = ClipFunction::default();
|
||||
|
||||
// eval with unsigned integers
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt8Vector::from_values(0..10)),
|
||||
@@ -249,7 +247,9 @@ mod tests {
|
||||
)),
|
||||
];
|
||||
|
||||
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
|
||||
let vector = ClipFunction
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
assert_eq!(10, vector.len());
|
||||
|
||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
||||
@@ -266,8 +266,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clip_fn_float() {
|
||||
let clip = ClipFunction::default();
|
||||
|
||||
// eval with floats
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Int8Vector::from_values(0..10)),
|
||||
@@ -281,7 +279,9 @@ mod tests {
|
||||
)),
|
||||
];
|
||||
|
||||
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
|
||||
let vector = ClipFunction
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
assert_eq!(10, vector.len());
|
||||
|
||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
||||
|
||||
@@ -291,7 +291,7 @@ mod tests {
|
||||
];
|
||||
let vector = interp(&args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let res = vec![3.0, 3.0, 2.5, 0.0];
|
||||
let res = [3.0, 3.0, 2.5, 0.0];
|
||||
for (i, item) in res.iter().enumerate().take(vector.len()) {
|
||||
assert!(matches!(vector.get(i),Value::Float64(v) if v==*item));
|
||||
}
|
||||
@@ -305,7 +305,7 @@ mod tests {
|
||||
let left = vec![-1];
|
||||
let right = vec![2];
|
||||
|
||||
let expect = vec![-1.0, 3.0, 2.5, 2.0, 0.0, 2.0];
|
||||
let expect = [-1.0, 3.0, 2.5, 2.0, 0.0, 2.0];
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Float64Vector::from_vec(x)),
|
||||
|
||||
@@ -22,6 +22,6 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction::default()));
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_string_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
@@ -190,7 +190,7 @@ mod tests {
|
||||
Some("2022-06-30T23:59:60Z"),
|
||||
Some("invalid_time_stamp"),
|
||||
];
|
||||
let results = vec![Some(1677652502), None, Some(1656633600), None];
|
||||
let results = [Some(1677652502), None, Some(1656633600), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
@@ -211,7 +211,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_int_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
@@ -234,7 +234,7 @@ mod tests {
|
||||
));
|
||||
|
||||
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
||||
let results = vec![Some(3), None, Some(5), None];
|
||||
let results = [Some(3), None, Some(5), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
@@ -255,7 +255,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
@@ -283,7 +283,7 @@ mod tests {
|
||||
Some(TimestampSecond::new(42)),
|
||||
None,
|
||||
];
|
||||
let results = vec![Some(123), None, Some(42), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector: TimestampSecondVector = build_vector_from_slice(×);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
@@ -77,7 +77,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_create_udf() {
|
||||
let f = Arc::new(TestAndFunction::default());
|
||||
let f = Arc::new(TestAndFunction);
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ConstantVector::new(
|
||||
|
||||
@@ -6,19 +6,22 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-error = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
reqwest = { version = "0.11", features = [
|
||||
"json",
|
||||
"rustls-tls",
|
||||
], default-features = false }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
uuid.workspace = true
|
||||
once_cell = "1.17.0"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
common-test-util = { path = "../test-util" }
|
||||
tempfile.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { path = "../version" }
|
||||
common-version = { workspace = true }
|
||||
|
||||
@@ -14,30 +14,26 @@
|
||||
|
||||
use std::env;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_runtime::error::{Error, Result};
|
||||
use common_runtime::{BoxedTaskFunction, RepeatedTask, Runtime, TaskFunction};
|
||||
use common_telemetry::debug;
|
||||
use once_cell::sync::Lazy;
|
||||
use common_telemetry::{debug, info};
|
||||
use reqwest::{Client, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const TELEMETRY_URL: &str = "https://api-preview.greptime.cloud/db/otel/statistics";
|
||||
|
||||
// Getting the right path when running on windows
|
||||
static TELEMETRY_UUID_FILE_NAME: Lazy<PathBuf> = Lazy::new(|| {
|
||||
let mut path = PathBuf::new();
|
||||
path.push(env::temp_dir());
|
||||
path.push(".greptimedb-telemetry-uuid");
|
||||
path
|
||||
});
|
||||
/// The URL to report telemetry data.
|
||||
pub const TELEMETRY_URL: &str = "https://api.greptime.cloud/db/otel/statistics";
|
||||
/// The local installation uuid cache file
|
||||
const UUID_FILE_NAME: &str = ".greptimedb-telemetry-uuid";
|
||||
|
||||
/// The default interval of reporting telemetry data to greptime cloud
|
||||
pub static TELEMETRY_INTERVAL: Duration = Duration::from_secs(60 * 30);
|
||||
|
||||
/// The default connect timeout to greptime cloud.
|
||||
const GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const GREPTIMEDB_TELEMETRY_CLIENT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
/// The default request timeout to greptime cloud.
|
||||
const GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
pub enum GreptimeDBTelemetryTask {
|
||||
Enable(RepeatedTask<Error>),
|
||||
@@ -54,6 +50,8 @@ impl GreptimeDBTelemetryTask {
|
||||
}
|
||||
|
||||
pub fn start(&self, runtime: Runtime) -> Result<()> {
|
||||
print_anonymous_usage_data_disclaimer();
|
||||
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable(task) => task.start(runtime),
|
||||
GreptimeDBTelemetryTask::Disable => Ok(()),
|
||||
@@ -68,14 +66,22 @@ impl GreptimeDBTelemetryTask {
|
||||
}
|
||||
}
|
||||
|
||||
/// Telemetry data to report
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct StatisticData {
|
||||
/// Operating system name, such as `linux`, `windows` etc.
|
||||
pub os: String,
|
||||
/// The greptimedb version
|
||||
pub version: String,
|
||||
/// The architecture of the CPU, such as `x86`, `x86_64` etc.
|
||||
pub arch: String,
|
||||
/// The running mode, `standalone` or `distributed`.
|
||||
pub mode: Mode,
|
||||
/// The git commit revision of greptimedb
|
||||
pub git_commit: String,
|
||||
/// The node number
|
||||
pub nodes: Option<i32>,
|
||||
/// The local installation uuid
|
||||
pub uuid: String,
|
||||
}
|
||||
|
||||
@@ -116,14 +122,14 @@ pub trait Collector {
|
||||
|
||||
async fn get_nodes(&self) -> Option<i32>;
|
||||
|
||||
fn get_uuid(&mut self) -> Option<String> {
|
||||
fn get_uuid(&mut self, working_home: &Option<String>) -> Option<String> {
|
||||
match self.get_uuid_cache() {
|
||||
Some(uuid) => Some(uuid),
|
||||
None => {
|
||||
if self.get_retry() > 3 {
|
||||
return None;
|
||||
}
|
||||
match default_get_uuid() {
|
||||
match default_get_uuid(working_home) {
|
||||
Some(uuid) => {
|
||||
self.set_uuid_cache(uuid.clone());
|
||||
Some(uuid)
|
||||
@@ -138,8 +144,26 @@ pub trait Collector {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_get_uuid() -> Option<String> {
|
||||
let path = (*TELEMETRY_UUID_FILE_NAME).as_path();
|
||||
fn print_anonymous_usage_data_disclaimer() {
|
||||
info!("Attention: GreptimeDB now collects anonymous usage data to help improve its roadmap and prioritize features.");
|
||||
info!(
|
||||
"To learn more about this anonymous program and how to deactivate it if you don't want to participate, please visit the following URL: ");
|
||||
info!("https://docs.greptime.com/reference/telemetry");
|
||||
}
|
||||
|
||||
pub fn default_get_uuid(working_home: &Option<String>) -> Option<String> {
|
||||
let temp_dir = env::temp_dir();
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(
|
||||
working_home
|
||||
.as_ref()
|
||||
.map(Path::new)
|
||||
.unwrap_or_else(|| temp_dir.as_path()),
|
||||
);
|
||||
path.push(UUID_FILE_NAME);
|
||||
|
||||
let path = path.as_path();
|
||||
match std::fs::read(path) {
|
||||
Ok(bytes) => Some(String::from_utf8_lossy(&bytes).to_string()),
|
||||
Err(e) => {
|
||||
@@ -164,6 +188,7 @@ pub fn default_get_uuid() -> Option<String> {
|
||||
pub struct GreptimeDBTelemetry {
|
||||
statistics: Box<dyn Collector + Send + Sync>,
|
||||
client: Option<Client>,
|
||||
working_home: Option<String>,
|
||||
telemetry_url: &'static str,
|
||||
}
|
||||
|
||||
@@ -180,12 +205,13 @@ impl TaskFunction<Error> for GreptimeDBTelemetry {
|
||||
}
|
||||
|
||||
impl GreptimeDBTelemetry {
|
||||
pub fn new(statistics: Box<dyn Collector + Send + Sync>) -> Self {
|
||||
pub fn new(working_home: Option<String>, statistics: Box<dyn Collector + Send + Sync>) -> Self {
|
||||
let client = Client::builder()
|
||||
.connect_timeout(GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT)
|
||||
.timeout(GREPTIMEDB_TELEMETRY_CLIENT_TIMEOUT)
|
||||
.timeout(GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT)
|
||||
.build();
|
||||
Self {
|
||||
working_home,
|
||||
statistics,
|
||||
client: client.ok(),
|
||||
telemetry_url: TELEMETRY_URL,
|
||||
@@ -193,7 +219,7 @@ impl GreptimeDBTelemetry {
|
||||
}
|
||||
|
||||
pub async fn report_telemetry_info(&mut self) -> Option<Response> {
|
||||
match self.statistics.get_uuid() {
|
||||
match self.statistics.get_uuid(&self.working_home) {
|
||||
Some(uuid) => {
|
||||
let data = StatisticData {
|
||||
os: self.statistics.get_os(),
|
||||
@@ -206,7 +232,7 @@ impl GreptimeDBTelemetry {
|
||||
};
|
||||
|
||||
if let Some(client) = self.client.as_ref() {
|
||||
debug!("report version: {:?}", data);
|
||||
info!("reporting greptimedb version: {:?}", data);
|
||||
let result = client.post(self.telemetry_url).json(&data).send().await;
|
||||
debug!("report version result: {:?}", result);
|
||||
result.ok()
|
||||
@@ -232,7 +258,7 @@ mod tests {
|
||||
use reqwest::Client;
|
||||
use tokio::spawn;
|
||||
|
||||
use crate::{Collector, GreptimeDBTelemetry, Mode, StatisticData};
|
||||
use crate::{default_get_uuid, Collector, GreptimeDBTelemetry, Mode, StatisticData};
|
||||
|
||||
static COUNT: AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
|
||||
@@ -300,7 +326,7 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_uuid(&mut self) -> Option<String> {
|
||||
fn get_uuid(&mut self, _working_home: &Option<String>) -> Option<String> {
|
||||
Some("test".to_string())
|
||||
}
|
||||
}
|
||||
@@ -331,13 +357,19 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_uuid(&mut self) -> Option<String> {
|
||||
fn get_uuid(&mut self, _working_home: &Option<String>) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
let working_home_temp = tempfile::Builder::new()
|
||||
.prefix("greptimedb_telemetry")
|
||||
.tempdir()
|
||||
.unwrap();
|
||||
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
||||
|
||||
let test_statistic = Box::new(TestStatistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(test_statistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(Some(working_home.clone()), test_statistic);
|
||||
let url = Box::leak(format!("{}:{}", "http://localhost", port).into_boxed_str());
|
||||
test_report.telemetry_url = url;
|
||||
let response = test_report.report_telemetry_info().await.unwrap();
|
||||
@@ -351,7 +383,7 @@ mod tests {
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
|
||||
let failed_statistic = Box::new(FailedStatistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(failed_statistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(Some(working_home), failed_statistic);
|
||||
failed_report.telemetry_url = url;
|
||||
let response = failed_report.report_telemetry_info().await;
|
||||
assert!(response.is_none());
|
||||
@@ -368,4 +400,18 @@ mod tests {
|
||||
assert_eq!("1", body);
|
||||
tx.send(()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_uuid() {
|
||||
let working_home_temp = tempfile::Builder::new()
|
||||
.prefix("greptimedb_telemetry")
|
||||
.tempdir()
|
||||
.unwrap();
|
||||
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
||||
|
||||
let uuid = default_get_uuid(&Some(working_home.clone()));
|
||||
assert!(uuid.is_some());
|
||||
assert_eq!(uuid, default_get_uuid(&Some(working_home.clone())));
|
||||
assert_eq!(uuid, default_get_uuid(&Some(working_home.clone())));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,17 +5,17 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../../api" }
|
||||
api = { workspace = true }
|
||||
async-trait.workspace = true
|
||||
common-base = { path = "../base" }
|
||||
common-catalog = { path = "../catalog" }
|
||||
common-error = { path = "../error" }
|
||||
common-query = { path = "../query" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-time = { path = "../time" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
table = { path = "../../table" }
|
||||
table = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -24,16 +24,19 @@ use api::v1::{
|
||||
use common_base::BitVec;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_time::{Date, DateTime};
|
||||
use common_time::{Date, DateTime, Interval};
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::{ValueRef, VectorRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::types::{Int16Type, Int8Type, TimeType, TimestampType, UInt16Type, UInt8Type};
|
||||
use datatypes::types::{
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int32Vector, Int64Vector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector,
|
||||
@@ -216,6 +219,25 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
|
||||
Time::new_nanosecond(*v)
|
||||
))
|
||||
}
|
||||
ColumnDataType::IntervalYearMonth => {
|
||||
collect_values!(values.interval_year_month_values, |v| {
|
||||
ValueRef::Interval(Interval::from_i32(*v))
|
||||
})
|
||||
}
|
||||
ColumnDataType::IntervalDayTime => {
|
||||
collect_values!(values.interval_day_time_values, |v| {
|
||||
ValueRef::Interval(Interval::from_i64(*v))
|
||||
})
|
||||
}
|
||||
ColumnDataType::IntervalMonthDayNano => {
|
||||
collect_values!(values.interval_month_day_nano_values, |v| {
|
||||
ValueRef::Interval(Interval::from_month_day_nano(
|
||||
v.months,
|
||||
v.days,
|
||||
v.nanoseconds,
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,10 +446,22 @@ fn values_to_vector(data_type: &ConcreteDataType, values: Values) -> VectorRef {
|
||||
)),
|
||||
},
|
||||
|
||||
ConcreteDataType::Interval(_)
|
||||
| ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Interval(unit) => match unit {
|
||||
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
||||
values.interval_year_month_values,
|
||||
)),
|
||||
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
|
||||
values.interval_day_time_values,
|
||||
)),
|
||||
IntervalType::MonthDayNano(_) => {
|
||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||
values.interval_month_day_nano_values.iter().map(|x| {
|
||||
Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
|
||||
}),
|
||||
))
|
||||
}
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -556,10 +590,28 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
.map(|v| Value::Time(Time::new_nanosecond(v)))
|
||||
.collect(),
|
||||
|
||||
ConcreteDataType::Interval(_)
|
||||
| ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
|
||||
.interval_year_month_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Interval(Interval::from_i32(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
|
||||
.interval_day_time_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Interval(Interval::from_i64(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
|
||||
.interval_month_day_nano_values
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
Value::Interval(Interval::from_month_day_nano(
|
||||
v.months,
|
||||
v.days,
|
||||
v.nanoseconds,
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -576,15 +628,16 @@ mod tests {
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::{Column, ColumnDataType, SemanticType};
|
||||
use api::v1::{Column, ColumnDataType, IntervalMonthDayNano, SemanticType};
|
||||
use common_base::BitVec;
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::timestamp::{TimeUnit, Timestamp};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use datatypes::types::{
|
||||
TimeMillisecondType, TimeSecondType, TimeType, TimestampMillisecondType,
|
||||
TimestampSecondType, TimestampType,
|
||||
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType,
|
||||
TimeSecondType, TimeType, TimestampMillisecondType, TimestampSecondType, TimestampType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use paste::paste;
|
||||
@@ -642,8 +695,8 @@ mod tests {
|
||||
);
|
||||
|
||||
let column_defs = create_expr.column_defs;
|
||||
assert_eq!(column_defs[4].name, create_expr.time_index);
|
||||
assert_eq!(5, column_defs.len());
|
||||
assert_eq!(column_defs[5].name, create_expr.time_index);
|
||||
assert_eq!(6, column_defs.len());
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
@@ -701,6 +754,20 @@ mod tests {
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "interval")
|
||||
.unwrap()
|
||||
.datatype
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
@@ -734,7 +801,7 @@ mod tests {
|
||||
|
||||
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
|
||||
|
||||
assert_eq!(3, add_columns.add_columns.len());
|
||||
assert_eq!(4, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
assert!(host_column.is_key);
|
||||
|
||||
@@ -767,6 +834,19 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let interval_column = &add_columns.add_columns[3];
|
||||
assert!(!interval_column.is_key);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
interval_column.column_def.as_ref().unwrap().datatype
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1011,6 +1091,70 @@ mod tests {
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_interval_values() {
|
||||
// year_month
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Interval(IntervalType::YearMonth(IntervalYearMonthType)),
|
||||
Values {
|
||||
interval_year_month_values: vec![1_i32, 2_i32, 3_i32],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_year_month(1_i32)),
|
||||
Value::Interval(Interval::from_year_month(2_i32)),
|
||||
Value::Interval(Interval::from_year_month(3_i32)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// day_time
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Interval(IntervalType::DayTime(IntervalDayTimeType)),
|
||||
Values {
|
||||
interval_day_time_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_i64(1_i64)),
|
||||
Value::Interval(Interval::from_i64(2_i64)),
|
||||
Value::Interval(Interval::from_i64(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// month_day_nano
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
||||
Values {
|
||||
interval_month_day_nano_values: vec![
|
||||
IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
months: 5,
|
||||
days: 6,
|
||||
nanoseconds: 7,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
months: 9,
|
||||
days: 10,
|
||||
nanoseconds: 11,
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
|
||||
Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
|
||||
Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_null() {
|
||||
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);
|
||||
@@ -1076,6 +1220,28 @@ mod tests {
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
};
|
||||
|
||||
let interval1 = IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
};
|
||||
let interval2 = IntervalMonthDayNano {
|
||||
months: 4,
|
||||
days: 5,
|
||||
nanoseconds: 6,
|
||||
};
|
||||
let interval_vals = Values {
|
||||
interval_month_day_nano_values: vec![interval1, interval2],
|
||||
..Default::default()
|
||||
};
|
||||
let interval_column = Column {
|
||||
column_name: "interval".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(interval_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||
};
|
||||
|
||||
let ts_vals = Values {
|
||||
ts_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
@@ -1089,7 +1255,14 @@ mod tests {
|
||||
};
|
||||
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, time_column, ts_column],
|
||||
vec![
|
||||
host_column,
|
||||
cpu_column,
|
||||
mem_column,
|
||||
time_column,
|
||||
interval_column,
|
||||
ts_column,
|
||||
],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -5,18 +5,18 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../../api" }
|
||||
api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-recordbatch = { path = "../recordbatch" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -249,7 +249,9 @@ mod test {
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(flight_data.len(), 3);
|
||||
let [d1, d2, d3] = flight_data.as_slice() else { unreachable!() };
|
||||
let [d1, d2, d3] = flight_data.as_slice() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let decoder = &mut FlightDecoder::default();
|
||||
assert!(decoder.schema.is_none());
|
||||
@@ -263,19 +265,25 @@ mod test {
|
||||
|
||||
let message = decoder.try_decode(d1.clone()).unwrap();
|
||||
assert!(matches!(message, FlightMessage::Schema(_)));
|
||||
let FlightMessage::Schema(decoded_schema) = message else { unreachable!() };
|
||||
let FlightMessage::Schema(decoded_schema) = message else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(decoded_schema, schema);
|
||||
|
||||
let _ = decoder.schema.as_ref().unwrap();
|
||||
|
||||
let message = decoder.try_decode(d2.clone()).unwrap();
|
||||
assert!(matches!(message, FlightMessage::Recordbatch(_)));
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else { unreachable!() };
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(actual_batch, batch1);
|
||||
|
||||
let message = decoder.try_decode(d3.clone()).unwrap();
|
||||
assert!(matches!(message, FlightMessage::Recordbatch(_)));
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else { unreachable!() };
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(actual_batch, batch2);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::helper::convert_i128_to_interval;
|
||||
use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{TimeType, TimestampType, WrapperType};
|
||||
use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, TimeMicrosecondVector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
|
||||
@@ -68,7 +70,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_)| ConcreteDataType::Interval(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
@@ -192,6 +194,24 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
TimeNanosecondVector,
|
||||
time_nanosecond_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)),
|
||||
IntervalYearMonthVector,
|
||||
interval_year_month_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)),
|
||||
IntervalDayTimeVector,
|
||||
interval_day_time_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)),
|
||||
IntervalMonthDayNanoVector,
|
||||
interval_month_day_nano_values,
|
||||
|x| { convert_i128_to_interval(x.into_native()) }
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -222,6 +242,43 @@ mod tests {
|
||||
assert_eq!(vec![1, 2, 3], values.time_second_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_interval_year_month() {
|
||||
let array = IntervalYearMonthVector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
let array: VectorRef = Arc::new(array);
|
||||
|
||||
let values = values(&[array]).unwrap();
|
||||
|
||||
assert_eq!(vec![1, 2, 3], values.interval_year_month_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_interval_day_time() {
|
||||
let array = IntervalDayTimeVector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
let array: VectorRef = Arc::new(array);
|
||||
|
||||
let values = values(&[array]).unwrap();
|
||||
|
||||
assert_eq!(vec![1, 2, 3], values.interval_day_time_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_interval_month_day_nano() {
|
||||
let array = IntervalMonthDayNanoVector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
let array: VectorRef = Arc::new(array);
|
||||
|
||||
let values = values(&[array]).unwrap();
|
||||
|
||||
(0..3).for_each(|i| {
|
||||
assert_eq!(values.interval_month_day_nano_values[i].months, 0);
|
||||
assert_eq!(values.interval_month_day_nano_values[i].days, 0);
|
||||
assert_eq!(
|
||||
values.interval_month_day_nano_values[i].nanoseconds,
|
||||
i as i64 + 1
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_arrays_string() {
|
||||
let array = StringVector::from(vec![
|
||||
|
||||
@@ -5,12 +5,14 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
common-error = { workspace = true }
|
||||
snafu.workspace = true
|
||||
tempfile = "3.4"
|
||||
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
|
||||
tokio.workspace = true
|
||||
|
||||
[dependencies.tikv-jemalloc-sys]
|
||||
version = "0.5"
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies.tikv-jemalloc-sys]
|
||||
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
|
||||
version = "0.5"
|
||||
|
||||
@@ -13,51 +13,28 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
use snafu::Snafu;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to read OPT_PROF, source: {}", source))]
|
||||
ReadOptProf { source: tikv_jemalloc_ctl::Error },
|
||||
#[snafu(display("{source}"))]
|
||||
Internal { source: BoxedError },
|
||||
|
||||
#[snafu(display("Memory profiling is not enabled"))]
|
||||
ProfilingNotEnabled,
|
||||
|
||||
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
|
||||
BuildTempPath { path: PathBuf, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
|
||||
OpenTempFile {
|
||||
path: String,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to dump profiling data to temp file: {:?}, source: {}",
|
||||
path,
|
||||
source
|
||||
))]
|
||||
DumpProfileData {
|
||||
path: PathBuf,
|
||||
source: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
#[snafu(display("Memory profiling is not supported"))]
|
||||
ProfilingNotSupported,
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::ReadOptProf { .. } => StatusCode::Internal,
|
||||
Error::ProfilingNotEnabled => StatusCode::InvalidArguments,
|
||||
Error::BuildTempPath { .. } => StatusCode::Internal,
|
||||
Error::OpenTempFile { .. } => StatusCode::StorageUnavailable,
|
||||
Error::DumpProfileData { .. } => StatusCode::StorageUnavailable,
|
||||
Error::Internal { source } => source.status_code(),
|
||||
Error::ProfilingNotSupported => StatusCode::Unsupported,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
76
src/common/mem-prof/src/jemalloc.rs
Normal file
76
src/common/mem-prof/src/jemalloc.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod error;
|
||||
|
||||
use std::ffi::{c_char, CString};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use error::{
|
||||
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
|
||||
ReadOptProfSnafu,
|
||||
};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
const PROF_DUMP: &[u8] = b"prof.dump\0";
|
||||
const OPT_PROF: &[u8] = b"opt.prof\0";
|
||||
|
||||
pub async fn dump_profile() -> Result<Vec<u8>> {
|
||||
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
|
||||
let tmp_path = tempfile::tempdir().map_err(|_| {
|
||||
BuildTempPathSnafu {
|
||||
path: std::env::temp_dir(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
let mut path_buf = PathBuf::from(tmp_path.path());
|
||||
path_buf.push("greptimedb.hprof");
|
||||
|
||||
let path = path_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| BuildTempPathSnafu { path: &path_buf }.build())?
|
||||
.to_string();
|
||||
|
||||
let mut bytes = CString::new(path.as_str())
|
||||
.map_err(|_| BuildTempPathSnafu { path: &path_buf }.build())?
|
||||
.into_bytes_with_nul();
|
||||
|
||||
{
|
||||
// #safety: we always expect a valid temp file path to write profiling data to.
|
||||
let ptr = bytes.as_mut_ptr() as *mut c_char;
|
||||
unsafe {
|
||||
tikv_jemalloc_ctl::raw::write(PROF_DUMP, ptr)
|
||||
.context(DumpProfileDataSnafu { path: path_buf })?
|
||||
}
|
||||
}
|
||||
|
||||
let mut f = tokio::fs::File::open(path.as_str())
|
||||
.await
|
||||
.context(OpenTempFileSnafu { path: &path })?;
|
||||
let mut buf = vec![];
|
||||
let _ = f
|
||||
.read_to_end(&mut buf)
|
||||
.await
|
||||
.context(OpenTempFileSnafu { path })?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn is_prof_enabled() -> Result<bool> {
|
||||
// safety: OPT_PROF variable, if present, is always a boolean value.
|
||||
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })
|
||||
}
|
||||
73
src/common/mem-prof/src/jemalloc/error.rs
Normal file
73
src/common/mem-prof/src/jemalloc/error.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to read OPT_PROF, source: {}", source))]
|
||||
ReadOptProf { source: tikv_jemalloc_ctl::Error },
|
||||
|
||||
#[snafu(display("Memory profiling is not enabled"))]
|
||||
ProfilingNotEnabled,
|
||||
|
||||
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
|
||||
BuildTempPath { path: PathBuf, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
|
||||
OpenTempFile {
|
||||
path: String,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to dump profiling data to temp file: {:?}, source: {}",
|
||||
path,
|
||||
source
|
||||
))]
|
||||
DumpProfileData {
|
||||
path: PathBuf,
|
||||
source: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::ReadOptProf { .. } => StatusCode::Internal,
|
||||
Error::ProfilingNotEnabled => StatusCode::InvalidArguments,
|
||||
Error::BuildTempPath { .. } => StatusCode::Internal,
|
||||
Error::OpenTempFile { .. } => StatusCode::StorageUnavailable,
|
||||
Error::DumpProfileData { .. } => StatusCode::StorageUnavailable,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for crate::error::Error {
|
||||
fn from(e: Error) -> Self {
|
||||
Self::Internal {
|
||||
source: BoxedError::new(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,62 +14,12 @@
|
||||
|
||||
pub mod error;
|
||||
|
||||
use std::ffi::{c_char, CString};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::error::{
|
||||
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
|
||||
ReadOptProfSnafu,
|
||||
};
|
||||
|
||||
const PROF_DUMP: &[u8] = b"prof.dump\0";
|
||||
const OPT_PROF: &[u8] = b"opt.prof\0";
|
||||
#[cfg(not(windows))]
|
||||
mod jemalloc;
|
||||
#[cfg(not(windows))]
|
||||
pub use jemalloc::dump_profile;
|
||||
|
||||
#[cfg(windows)]
|
||||
pub async fn dump_profile() -> error::Result<Vec<u8>> {
|
||||
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
|
||||
let tmp_path = tempfile::tempdir().map_err(|_| {
|
||||
BuildTempPathSnafu {
|
||||
path: std::env::temp_dir(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
let mut path_buf = PathBuf::from(tmp_path.path());
|
||||
path_buf.push("greptimedb.hprof");
|
||||
|
||||
let path = path_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| BuildTempPathSnafu { path: &path_buf }.build())?
|
||||
.to_string();
|
||||
|
||||
let mut bytes = CString::new(path.as_str())
|
||||
.map_err(|_| BuildTempPathSnafu { path: &path_buf }.build())?
|
||||
.into_bytes_with_nul();
|
||||
|
||||
{
|
||||
// #safety: we always expect a valid temp file path to write profiling data to.
|
||||
let ptr = bytes.as_mut_ptr() as *mut c_char;
|
||||
unsafe {
|
||||
tikv_jemalloc_ctl::raw::write(PROF_DUMP, ptr)
|
||||
.context(DumpProfileDataSnafu { path: path_buf })?
|
||||
}
|
||||
}
|
||||
|
||||
let mut f = tokio::fs::File::open(path.as_str())
|
||||
.await
|
||||
.context(OpenTempFileSnafu { path: &path })?;
|
||||
let mut buf = vec![];
|
||||
let _ = f
|
||||
.read_to_end(&mut buf)
|
||||
.await
|
||||
.context(OpenTempFileSnafu { path })?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn is_prof_enabled() -> error::Result<bool> {
|
||||
// safety: OPT_PROF variable, if present, is always a boolean value.
|
||||
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })
|
||||
error::ProfilingNotSupportedSnafu.fail()
|
||||
}
|
||||
|
||||
@@ -5,14 +5,14 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../../api" }
|
||||
api = { workspace = true }
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-catalog = { path = "../catalog" }
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-time = { path = "../time" }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
@@ -21,12 +21,11 @@ regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api = { path = "../../store-api" }
|
||||
table = { path = "../../table" }
|
||||
store-api = { workspace = true }
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
@@ -123,7 +123,8 @@ impl DatanodeTableManager {
|
||||
let Some(curr) = resp
|
||||
.prev_kv
|
||||
.map(|kv| DatanodeTableValue::try_from_raw_value(kv.value))
|
||||
.transpose()? else {
|
||||
.transpose()?
|
||||
else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, key: {key}, val: {val:?}"),
|
||||
}.fail();
|
||||
|
||||
@@ -81,7 +81,7 @@ impl TableInfoManager {
|
||||
let Some(curr) = curr else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, table_id: {table_id}, table_info: {table_info:?}"),
|
||||
}.fail()
|
||||
}.fail();
|
||||
};
|
||||
ensure!(
|
||||
&curr.table_info == table_info,
|
||||
@@ -161,6 +161,13 @@ mod tests {
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[test]
|
||||
fn test_deserialization_compatibility() {
|
||||
let s = r#"{"version":1,"table_info":{"ident":{"table_id":8714,"version":0},"name":"go_gc_duration_seconds","desc":"Created on insertion","catalog_name":"e87lehzy63d4cloud_docs_test","schema_name":"public","meta":{"schema":{"column_schemas":[{"name":"instance","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"job","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"quantile","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"greptime_timestamp","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"greptime_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":3,"version":0},"primary_key_indices":[0,1,2],"value_indices":[],"engine":"mito","next_column_id":5,"region_numbers":[],"engine_options":{},"options":{"write_buffer_size":null,"ttl":null,"extra_options":{}},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
let v = TableInfoValue::try_from_raw_value(s.as_bytes().to_vec()).unwrap();
|
||||
assert!(v.table_info.meta.partition_key_indices.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_info_manager() {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
@@ -163,10 +163,11 @@ impl TableNameManager {
|
||||
let Some(curr) = result
|
||||
.prev_kv
|
||||
.map(|x| TableNameValue::try_from_raw_value(x.value))
|
||||
.transpose()? else {
|
||||
.transpose()?
|
||||
else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, key: {key}, value: {value:?}"),
|
||||
}.fail()
|
||||
}.fail();
|
||||
};
|
||||
ensure!(
|
||||
curr.table_id == table_id,
|
||||
@@ -226,7 +227,8 @@ impl TableNameManager {
|
||||
// name, then the table must not exist at the first place.
|
||||
return TableNotExistSnafu {
|
||||
table_name: TableName::from(key).to_string(),
|
||||
}.fail();
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
ensure!(
|
||||
|
||||
@@ -91,7 +91,7 @@ impl TableRegionManager {
|
||||
let Some(curr) = curr else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, table_id: {table_id}, region_distribution: {region_distribution:?}"),
|
||||
}.fail()
|
||||
}.fail();
|
||||
};
|
||||
ensure!(
|
||||
&curr.region_distribution == region_distribution,
|
||||
|
||||
@@ -236,7 +236,7 @@ impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
start: key,
|
||||
end: range_end,
|
||||
};
|
||||
kvs.drain_filter(|key, _| range.contains(key))
|
||||
kvs.extract_if(|key, _| range.contains(key))
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(btree_drain_filter)]
|
||||
#![feature(btree_extract_if)]
|
||||
|
||||
pub mod error;
|
||||
pub mod heartbeat;
|
||||
|
||||
@@ -39,6 +39,7 @@ pub fn get_next_prefix_key(key: &[u8]) -> Vec<u8> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "common-pprof"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
pprof = { version = "0.11", features = [
|
||||
"flamegraph",
|
||||
"prost-codec",
|
||||
"protobuf",
|
||||
] }
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user