Compare commits

..

56 Commits

Author SHA1 Message Date
discord9
d815bdf770 refactor: per reviews 2024-05-27 14:27:21 +08:00
discord9
0b324563ac fix: range begin>range end 2024-05-27 14:27:21 +08:00
discord9
ffecb6882e mend 2024-05-27 14:27:21 +08:00
discord9
c185242997 fix: window_start when ts<start_time 2024-05-27 14:27:21 +08:00
discord9
9fda415b0d fix: ts<start time correct window 2024-05-27 14:27:21 +08:00
discord9
5eaf9816b9 fix: send buf clear 2024-05-27 14:27:21 +08:00
discord9
6684f8dce3 fix: test of tumble 2024-05-27 14:27:21 +08:00
discord9
e8660a6f7e fix: expire state 2024-05-27 14:27:21 +08:00
discord9
6659f3cc62 fix: reorder write requests 2024-05-27 14:27:21 +08:00
discord9
d218d65361 fix: default timestamp name 2024-05-27 14:27:21 +08:00
discord9
8f40ba42c1 feat: rename default ts to GREPTIME_TIMESTAMP 2024-05-27 14:27:21 +08:00
discord9
d1ce436442 fix(WIP): choose 2024-05-27 14:27:21 +08:00
discord9
e580ba63ec fix: optional args of tumble 2024-05-27 14:27:21 +08:00
LFC
297105266b feat: enable tcp keepalive for http server (#4019)
* feat: enable tcp keepalive for http server

* chore: for enterprise's update

* resolve PR comments
2024-05-27 04:07:36 +00:00
Ruihang Xia
1de17aec74 feat: change EXPIRE WHEN to EXPIRE AFTER (#4002)
* feat: change EXPIRE WHEN to EXPIRE AFTER

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change remaining

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename create_if_not_exist to create_if_not_exists

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* parse interval expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-05-27 04:05:55 +00:00
Weny Xu
389ded93d1 chore: add logs for setting the region to writable (#4044)
* chore: add logs for setting the region to writable

* fix: ignore redundant logs
2024-05-27 04:01:40 +00:00
Eugene Tolbakov
af486ec0d0 feat(opertor): check if a database is in use before dropping it (#4035)
feat(opertor): check if database is in use before dropping it
2024-05-27 03:31:58 +00:00
irenjj
25d64255a3 feat: support table level comment (#4042)
* feat: support table level comment

* use constants

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-05-27 02:28:52 +00:00
tison
3790020d78 build(deps): upgrade promql-parser to 0.4 (#4047)
* build(deps): upgrade promql-parser to 0.4

Signed-off-by: tison <wander4096@gmail.com>

* lock

Signed-off-by: tison <wander4096@gmail.com>

* catch up upgrades

Signed-off-by: tison <wander4096@gmail.com>

* concise method

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-27 01:51:59 +00:00
Weny Xu
5df3d4e5da feat: implement the LogStoreRawEntryReader and RawEntryReaderFilter (#4030)
* feat: implement the `LogStoreRawEntryReader`

* feat: implement the `RawEntryReaderFilter`

* test: add tests
2024-05-24 11:53:15 +00:00
tison
af670df515 ci: skip notification for manual releases (#4033)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-24 10:16:06 +00:00
Ruihang Xia
a58256d4d3 feat: round-robin selector (#4024)
* feat: implement round robin peer selector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document and test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-24 07:29:07 +00:00
Weny Xu
466f7c6448 feat: add RawEntryReader and OneshotWalEntryReader trait (#4027)
* feat: add `RawEntryReader` and `OneShotWalEntryReader` trait

* chore: rename `OneShot` to `Oneshot`

* refacotr: remove `region_id` from `OneshotWalEntryReader`
2024-05-24 06:30:50 +00:00
Ruihang Xia
0101657649 feat: remove one clone on constructing partition (#4028)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-24 04:01:19 +00:00
taobo
a3a2c8d063 feat: Add TLS support for gRPC service (#3957)
* feat: Add tls support for grpc service

* feat: add integration test

* fix: integration test

* fix: revert by suggestion

* fix: typos

* fix: optimize code

* fix: optimize code

* docs: update configs
2024-05-23 19:00:16 +00:00
Yingwen
dfc1acbb2a fix: notifies all workers once a region is flushed (#4016)
* fix: notify workers to handle stalled requests if flush is finished

* chore: change stalled count to gauge

* feat: process stalled requests eagerly
2024-05-23 12:45:00 +00:00
Lei, HUANG
0d055b6ee6 refactor: remove unused log config (#4021) 2024-05-23 08:59:42 +00:00
Weny Xu
614643ef7b chore(ci): add more replicas (#4015) 2024-05-23 02:43:24 +00:00
Ning Sun
b90b7adf6f feat: add fallback logic for vmagent sending wrong content type (#4009)
* feat: add fallback logic for vmagent sending wrong content type

* fix: resolve lint issues

* Update src/servers/src/http/prom_store.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-05-23 02:40:17 +00:00
Jeremyhi
418090b464 chore: log error for detail (#4011)
* chore: log error for detail

* chore: by cr
2024-05-22 12:17:20 +00:00
Lei, HUANG
090b59e8d6 feat: manual compaction (#3988)
* add compaction udf params

* wip: pass compaction options through grpc

* wip: pass compaction options all the way down to region server

* wip: window compaction task

* feat: trigger major compaction

* refactor: optimize compaction parameter parsing

* chore: rebase main

* chore: update proto

* chore: add some tests

* feat: validate catalog

* chore: fix typo and rebase main

* fix: some cr comments

* fix: file_time_bucket_span

* fix: avoid upper bound overflow

* chore: update proto
2024-05-22 09:42:21 +00:00
shuiyisong
9e1af79637 chore: add ttl to write_cache (#4010)
* chore: add ttl to write_cache

* chore: update test & add example config

* chore: fix typo

* chore: fix typo

* chore: fix typo
2024-05-22 06:50:12 +00:00
Yohan Wal
9800807fe5 fix(fuzz): sort inserted rows with primary keys and time index (#4008)
* fix(fuzz): sort inserted rows with primary keys and time index

* fix: correct index when replacing default

* fix: put null behind all values
2024-05-22 03:32:19 +00:00
zyy17
b86d79b906 fix: can't print log because the tracing guard is dropped (#4005)
* fix: avoid logging guard drop

* chore: remove unused '#[allow(dead_code)]'
2024-05-22 03:24:40 +00:00
Lei, HUANG
e070ba3c32 feat: respect time range when building parquet reader (#3947)
* feat: convert timestamp range filters to predicates

* chore: rebase main

* fix: remove prediactes once they have been added to timestamp filters to avoid duplicate filtering

* fix: some comments

* fix: resolve conflicts
2024-05-21 16:02:25 +00:00
Weny Xu
43bf7bffd0 fix: try to fix unstable fuzz test (#4003)
fix: ignore PoolTimedOut
2024-05-21 12:57:09 +00:00
Weny Xu
56aed6e6ff chore(ci): export kind logs (#3996)
* chore(ci): export kind logs

* chore: add empty line
2024-05-21 11:56:03 +00:00
zyy17
47785756e5 fix: move log_version() into build() of App to fix no log version on bootstrap (#4004) 2024-05-21 09:15:15 +00:00
Jeremyhi
0aa523cd8c feat: make create view procedure simple as others (#4001) 2024-05-21 08:30:57 +00:00
Weny Xu
7a8222dd97 fix: try to fix broken CI (#3998)
* fix: try to fix broken CI

* chore: using loop to check status
2024-05-21 07:02:18 +00:00
maco
40c585890a refactor: replace Expr with datafusion::Expr (#3995)
* refactor: replace Expr with datafusion::Expr

* fix: fmt-toml

* fix: cr comment
2024-05-21 06:40:29 +00:00
zyy17
da925e956e ci: change the image name of nightly build (#3994) 2024-05-21 06:11:12 +00:00
Weny Xu
d7ade3c854 chore(ci): add fuzz tests for distributed mode (#3967)
* chore(ci): add cfg for setup GreptimeDB cluster

* chore: use kind

* chore: always print info

* chore: add debug print

* chore: set etcd replica to 1

* ci: refactor e2e cfg

* ci: add Fuzz Test for distributed mode

* Apply suggestions from code review

* chore: apply suggestions from CR

* chore(ci): upload logs
2024-05-21 04:58:42 +00:00
Yingwen
179c8c716c feat: Adds RegionScanner trait (#3948)
* feat: define region scanner

* feat: single partition scanner

* feat: use single partition scanner

* feat: implement ExecutionPlan wip

* feat: mito engine returns single partition scanner

* feat: implement DisplayAs for region server

* feat: dummy table provider use handle_partitioned_query()

* test: update sqlness test

* feat: table provider use ReadFromRegion

* refactor: remove StreamScanAdapter

* chore: update lock

* style: fix clippy

* refactor: remove handle_query from the RegionEngine trait

* chore: address CR comments

* refactor: rename methods

* refactor: rename ReadFromRegion to RegionScanExec
2024-05-20 11:52:00 +00:00
shuiyisong
19543f9819 feat: support compression on gRPC server (#3961)
* feat: enable gzip in grpc server side

* feat: add enable_gzip_compression config

* test: add grpc compression test

* feat: support user configured compression on grpc server

* chore: update doc

* chore: add tests

* fix: make config-docs

* chore: fix cr issue

* chore: add test

* refactor: remove config on server side, auto enable all compression support

* chore: minor update

* chore: remove unused code

* refactor: enable zstd compression internally by default

* chore: minor fix
2024-05-20 11:28:00 +00:00
discord9
533ada70ca chore: remove a dbg! forget to remove (#3990)
* chore: remove a dbg! forget to remove

* remove other dbg! and add lint

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyo3 feature

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-20 08:34:47 +00:00
zyy17
c50ff23194 ci: add 'contents: write' permission (#3989) 2024-05-20 06:23:54 +00:00
tison
d7f1150098 ci: fixup strings in check ci status (#3987)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-20 03:59:05 +00:00
zyy17
82c3eca25e refactor: make the command entry cleaner (#3981)
* refactor: move run() in App trait

* refactor: introduce AppBuilder trait

* chore: remove AppBuilder

* refactor: remove Options struct and make the start() clean

* refactor: init once for common_telemetry::init_global_logging
2024-05-20 03:34:06 +00:00
Yingwen
df13832a59 feat: use cache in compaction (#3982) 2024-05-20 02:36:51 +00:00
tison
7da92eb9eb ci: check-status for nightly-ci (#3984)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-19 07:10:59 +00:00
Weny Xu
c71298d3d5 chore: pin cargo-ndk to 3.5.4 (#3979) 2024-05-18 08:46:01 +00:00
Yingwen
de594833ac docs: add v0.8.0 TSBS report (#3983)
docs: add v0.8.0 tsbs report
2024-05-18 08:09:16 +00:00
Eugene Tolbakov
6a9a92931d chore: change binary array type from LargeBinaryArray to BinaryArray (#3924)
* chore: change binary array type from LargeBinaryArray to BinaryArray

* fix: adjust try_into_vector logic

* fix: apply CR suggestions, add tests

* chore: fix failing test

* chore: fix integration test

* chore: adjust the assertions according to changed implementation

* chore: add a test with LargeBinary type

* chore: apply CR suggestions

* chore: simplify tests
2024-05-18 08:04:41 +00:00
tison
11ad5b3ed1 ci: report CI failures with creating issues (#3976)
* ci: report CI failures with creating issues

Signed-off-by: tison <wander4096@gmail.com>

* integrate with CI workflows

Signed-off-by: tison <wander4096@gmail.com>

* mention db-approver

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-18 03:03:56 +00:00
zyy17
b8354bbb55 docs: add toc for config docs (#3974) 2024-05-18 01:57:49 +00:00
240 changed files with 6171 additions and 2537 deletions

View File

@@ -0,0 +1,18 @@
name: Build and push CI Docker image
description: Build and push CI Docker image to local registry
inputs:
binary_path:
default: "./bin"
description: "Binary path"
runs:
using: composite
steps:
- name: Build and push to local registry
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
push: true
tags: localhost:5001/greptime/greptimedb:latest
build-args: |
BINARY_PATH=${{ inputs.binary_path }}

View File

@@ -0,0 +1,25 @@
name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 3
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm upgrade \
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -0,0 +1,85 @@
name: Setup GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
frontend-replicas:
default: 2
description: "Number of Frontend replicas"
datanode-replicas:
default: 2
description: "Number of Datanode replicas"
meta-replicas:
default: 3
description: "Number of Metasrv replicas"
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
runs:
using: composite
steps:
- name: Install GreptimeDB operator
shell: bash
run: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
--create-namespace \
greptimedb-operator greptime/greptimedb-operator \
-n greptimedb-admin \
--wait \
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
kubectl get pods -n my-greptimedb
sleep 5 # wait for 5 seconds before check again.
fi
done
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
kubectl describe nodes

10
.github/actions/setup-kind/action.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
./.github/scripts/kind-with-registry.sh

View File

@@ -57,3 +57,14 @@ runs:
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
- name: Export kind logs
if: failure()
shell: bash
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: kind-logs
path: /tmp/kind
retention-days: 3

66
.github/scripts/kind-with-registry.sh vendored Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env bash
set -e
set -o pipefail
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --wait 2m --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@@ -82,6 +82,9 @@ env:
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
@@ -321,7 +324,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
@@ -330,16 +333,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy dev build successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy dev build failed result
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -231,6 +231,130 @@ jobs:
path: /tmp/unstable-greptime/
retention-days: 3
build-greptime-ci:
name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo build --bin greptime --profile ci
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/ci/greptime bin
- name: Print greptime binaries info
run: ls -lh bin
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bin
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, Disk)
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: tar -xvf ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
sqlness:
name: Sqlness Test

View File

@@ -66,6 +66,13 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
@@ -188,6 +195,7 @@ jobs:
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
@@ -220,7 +228,7 @@ jobs:
with:
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -285,7 +293,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
@@ -294,16 +302,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy nightly build successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy nightly build failed result
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -12,6 +12,9 @@ concurrency:
env:
RUST_TOOLCHAIN: nightly-2024-04-20
permissions:
issues: write
jobs:
sqlness-test:
name: Run sqlness test
@@ -22,7 +25,6 @@ jobs:
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
@@ -39,6 +41,7 @@ jobs:
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -49,14 +52,6 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Run sqlness
run: cargo sqlness
- name: Notify slack if failed
if: failure()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
with:
payload: |
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
@@ -73,6 +68,7 @@ jobs:
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -110,11 +106,53 @@ jobs:
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed
if: failure()
uses: slackapi/slack-github-action@v1.23.0
check-status:
name: Check status
needs: [
sqlness-test,
sqlness-windows,
test-on-windows,
]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }}
steps:
- name: Set check result
id: set-check-result
run: |
echo "check-result=success" >> $GITHUB_OUTPUT
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
check-status
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result == 'success' }}
with:
payload: |
{"text": "Nightly CI failed for cargo test"}
{"text": "Nightly CI has completed successfully."}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result != 'success' }}
with:
payload: |
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -93,6 +93,11 @@ env:
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.9.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs:
allocate-runners:
name: Allocate runners
@@ -245,7 +250,7 @@ jobs:
- name: Set build macos result
id: set-build-macos-result
run: |
echo "build-macos-result=success" >> $GITHUB_OUTPUT
echo "build-macos-result=success" >> $GITHUB_OUTPUT
build-windows-artifacts:
name: Build Windows artifacts
@@ -318,7 +323,7 @@ jobs:
- name: Set build image result
id: set-build-image-result
run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT
echo "build-image-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts:
name: Release artifacts to CN region
@@ -436,7 +441,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,
@@ -447,16 +452,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy release successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
- name: Notify release successful result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has completed successfully."}
- name: Notifiy release failed result
- name: Notify release failed result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

205
Cargo.lock generated
View File

@@ -35,7 +35,7 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"cipher",
"cpufeatures",
]
@@ -57,7 +57,7 @@ version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"const-random",
"getrandom",
"once_cell",
@@ -82,7 +82,7 @@ checksum = "befdff0b4683a0824fc8719ce639a252d9d62cd89c8d0004c39e2417128c1eb8"
dependencies = [
"axum",
"bytes",
"cfg-if 1.0.0",
"cfg-if",
"http",
"indexmap 1.9.3",
"schemars",
@@ -834,7 +834,7 @@ checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
dependencies = [
"addr2line",
"cc",
"cfg-if 1.0.0",
"cfg-if",
"libc",
"miniz_oxide",
"object",
@@ -1009,7 +1009,7 @@ dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if 1.0.0",
"cfg-if",
"constant_time_eq",
]
@@ -1339,12 +1339,6 @@ dependencies = [
"nom",
]
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
@@ -1359,11 +1353,11 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "cfgrammar"
version = "0.12.0"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf74ea341ae8905eac9a234b6a5a845e118c25bbbdecf85ec77431a8b3bfa0be"
checksum = "163348850b1cd34fa99ef1592b5d598ea7e6752f18aff2125b67537e887edb36"
dependencies = [
"indexmap 1.9.3",
"indexmap 2.2.6",
"lazy_static",
"num-traits",
"regex",
@@ -1662,6 +1656,7 @@ dependencies = [
"tikv-jemallocator",
"tokio",
"toml 0.8.12",
"tracing-appender",
]
[[package]]
@@ -2308,7 +2303,7 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
]
[[package]]
@@ -2350,7 +2345,7 @@ version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
]
[[package]]
@@ -2580,7 +2575,7 @@ version = "5.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"hashbrown 0.14.5",
"lock_api",
"once_cell",
@@ -3424,7 +3419,7 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"dirs-sys-next",
]
@@ -3471,7 +3466,7 @@ version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"libc",
"socket2 0.4.10",
"winapi",
@@ -3534,7 +3529,7 @@ version = "0.8.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
]
[[package]]
@@ -3555,26 +3550,6 @@ dependencies = [
"syn 2.0.61",
]
[[package]]
name = "enum-iterator"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fd242f399be1da0a5354aa462d57b4ab2b4ee0683cc552f7c007d2d12d36e94"
dependencies = [
"enum-iterator-derive",
]
[[package]]
name = "enum-iterator-derive"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.61",
]
[[package]]
name = "enum_dispatch"
version = "0.3.13"
@@ -3716,7 +3691,7 @@ version = "3.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"rustix",
"windows-sys 0.48.0",
]
@@ -3757,7 +3732,7 @@ version = "0.2.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"libc",
"redox_syscall 0.4.1",
"windows-sys 0.52.0",
@@ -4185,25 +4160,13 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"js-sys",
"libc",
"wasi",
"wasm-bindgen",
]
[[package]]
name = "getset"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e45727250e75cc04ff2846a66397da8ef2b3db8e40e0cef4df67950a07621eb9"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "gimli"
version = "0.28.1"
@@ -4232,7 +4195,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a11db14b8502f55ca5348917fd18e6fcf140f55e#a11db14b8502f55ca5348917fd18e6fcf140f55e"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=902f75fdd170c572e90b1f640161d90995f20218#902f75fdd170c572e90b1f640161d90995f20218"
dependencies = [
"prost 0.12.4",
"serde",
@@ -4274,7 +4237,7 @@ version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"crunchy",
"num-traits",
]
@@ -4853,7 +4816,7 @@ version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"js-sys",
"wasm-bindgen",
"web-sys",
@@ -4954,7 +4917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if 1.0.0",
"cfg-if",
"combine",
"jni-sys",
"log",
@@ -5206,7 +5169,7 @@ version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"winapi",
]
@@ -5216,7 +5179,7 @@ version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"windows-targets 0.52.5",
]
@@ -5324,31 +5287,33 @@ dependencies = [
[[package]]
name = "lrlex"
version = "0.12.0"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22b832738fbfa58ad036580929e973b3b6bd31c6d6c7f18f6b5ea7b626675c85"
checksum = "77ff18e1bd3ed77d7bc2800a0f8b0e922a3c7ba525505be8bab9cf45dfc4984b"
dependencies = [
"cfgrammar",
"getopts",
"lazy_static",
"lrpar",
"num-traits",
"quote",
"regex",
"regex-syntax 0.8.3",
"serde",
"try_from",
"vergen",
]
[[package]]
name = "lrpar"
version = "0.12.0"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f270b952b07995fe874b10a5ed7dd28c80aa2130e37a7de7ed667d034e0a521"
checksum = "efea5a41b9988b5ae41ea9b2375a52cfa0e483f0210357209caa8d361a24a368"
dependencies = [
"bincode",
"cactus",
"cfgrammar",
"filetime",
"indexmap 1.9.3",
"indexmap 2.2.6",
"lazy_static",
"lrtable",
"num-traits",
@@ -5362,16 +5327,15 @@ dependencies = [
[[package]]
name = "lrtable"
version = "0.12.0"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a854115c6a10772ac154261592b082436abc869c812575cadcf9d7ceda8eff0b"
checksum = "ff5668c3bfd279ed24d5b0d24568c48dc993f9beabd51f74d1865a78c1d206ab"
dependencies = [
"cfgrammar",
"fnv",
"num-traits",
"serde",
"sparsevec",
"static_assertions",
"vob",
]
@@ -5511,7 +5475,7 @@ version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"digest",
]
@@ -5836,7 +5800,7 @@ version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"downcast",
"fragile",
"lazy_static",
@@ -5851,7 +5815,7 @@ version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -6118,7 +6082,7 @@ checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
dependencies = [
"autocfg",
"bitflags 1.3.2",
"cfg-if 1.0.0",
"cfg-if",
"libc",
"memoffset 0.6.5",
"pin-utils",
@@ -6131,7 +6095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
dependencies = [
"bitflags 1.3.2",
"cfg-if 1.0.0",
"cfg-if",
"libc",
"memoffset 0.7.1",
"pin-utils",
@@ -6144,7 +6108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
"bitflags 2.5.0",
"cfg-if 1.0.0",
"cfg-if",
"cfg_aliases",
"libc",
"memoffset 0.9.1",
@@ -6353,6 +6317,15 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "num_threads"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9"
dependencies = [
"libc",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
@@ -6849,7 +6822,7 @@ version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"instant",
"libc",
"redox_syscall 0.2.16",
@@ -6864,7 +6837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
"cfg-if",
"libc",
"petgraph",
"redox_syscall 0.5.1",
@@ -7336,7 +7309,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
"cfg-if",
"findshlibs",
"inferno",
"libc",
@@ -7518,7 +7491,7 @@ version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"fnv",
"lazy_static",
"libc",
@@ -7576,9 +7549,9 @@ dependencies = [
[[package]]
name = "promql-parser"
version = "0.1.4"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a96ad54e4f5d76ea28a131b1a09aaa6e58708eae98510f63ccec4ce7514bf30d"
checksum = "007a331efb31f6ddb644590ef22359c9469784931162aad92599e34bcfa66583"
dependencies = [
"cfgrammar",
"lazy_static",
@@ -7793,7 +7766,7 @@ version = "0.20.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"indoc",
"libc",
"memoffset 0.9.1",
@@ -8362,7 +8335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
dependencies = [
"cc",
"cfg-if 1.0.0",
"cfg-if",
"getrandom",
"libc",
"spin 0.9.8",
@@ -8493,7 +8466,7 @@ version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290ca1a1c8ca7edb7c3283bd44dc35dd54fdec6253a3912e201ba1072018fca8"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"proc-macro2",
"quote",
"rustc_version",
@@ -8553,7 +8526,7 @@ version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"ordered-multimap 0.4.3",
]
@@ -8563,7 +8536,7 @@ version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"ordered-multimap 0.7.3",
]
@@ -8796,7 +8769,7 @@ dependencies = [
"ascii",
"bitflags 1.3.2",
"bstr",
"cfg-if 1.0.0",
"cfg-if",
"hexf-parse",
"itertools 0.10.5",
"lexical-parse-float",
@@ -8917,7 +8890,7 @@ dependencies = [
"ascii",
"base64 0.13.1",
"blake2",
"cfg-if 1.0.0",
"cfg-if",
"crc32fast",
"crossbeam-utils",
"csv-core",
@@ -8985,7 +8958,7 @@ dependencies = [
"bitflags 1.3.2",
"bstr",
"caseless",
"cfg-if 1.0.0",
"cfg-if",
"chrono",
"crossbeam-utils",
"exitcode",
@@ -9058,7 +9031,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1e83c32c3f3c33b08496e0d1df9ea8c64d39adb8eb36a1ebb1440c690697aef"
dependencies = [
"bitflags 1.3.2",
"cfg-if 1.0.0",
"cfg-if",
"clipboard-win",
"dirs-next",
"fd-lock",
@@ -9081,7 +9054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dfc8644681285d1fb67a467fb3021bfea306b99b4146b166a1fe3ada965eece"
dependencies = [
"bitflags 1.3.2",
"cfg-if 1.0.0",
"cfg-if",
"clipboard-win",
"dirs-next",
"fd-lock",
@@ -9626,7 +9599,7 @@ version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"cpufeatures",
"digest",
]
@@ -9637,7 +9610,7 @@ version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"cpufeatures",
"digest",
]
@@ -9648,7 +9621,7 @@ version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"cpufeatures",
"digest",
]
@@ -9839,9 +9812,9 @@ dependencies = [
[[package]]
name = "sparsevec"
version = "0.1.4"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "928d1ef5df00aec8c5643c2ac37db4dd282763013c0fcc81efbb8e13db8dd8ec"
checksum = "35df5d2e580b29f3f7ec5b4ed49b0ab3acf7f3624122b3e823cafb9630f293b8"
dependencies = [
"num-traits",
"packedvec",
@@ -10169,6 +10142,8 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-wal",
"datafusion-expr 37.0.0",
"datafusion-physical-plan 37.0.0",
"datatypes",
"derive_builder 0.12.0",
"futures",
@@ -10482,7 +10457,7 @@ version = "0.30.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"core-foundation-sys",
"libc",
"ntapi",
@@ -10535,6 +10510,7 @@ dependencies = [
"datatypes",
"derive_builder 0.12.0",
"futures",
"greptime-proto",
"humantime",
"humantime-serde",
"parquet",
@@ -10580,7 +10556,7 @@ version = "3.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"fastrand",
"rustix",
"windows-sys 0.52.0",
@@ -10785,7 +10761,7 @@ version = "1.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"once_cell",
]
@@ -10839,7 +10815,9 @@ checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
dependencies = [
"deranged",
"itoa",
"libc",
"num-conv",
"num_threads",
"powerfmt",
"serde",
"time-core",
@@ -11196,6 +11174,7 @@ dependencies = [
"axum",
"base64 0.21.7",
"bytes",
"flate2",
"h2",
"http",
"http-body",
@@ -11213,6 +11192,7 @@ dependencies = [
"tower-layer",
"tower-service",
"tracing",
"zstd 0.12.4",
]
[[package]]
@@ -11556,22 +11536,13 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "try_from"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b"
dependencies = [
"cfg-if 0.1.10",
]
[[package]]
name = "twox-hash"
version = "1.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"rand",
"static_assertions",
]
@@ -12004,16 +11975,12 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "vergen"
version = "7.5.1"
version = "8.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f21b881cd6636ece9735721cf03c1fe1e774fe258683d084bb2812ab67435749"
checksum = "e27d6bdd219887a9eadd19e1c34f32e47fa332301184935c6d9bca26f3cca525"
dependencies = [
"anyhow",
"cfg-if 1.0.0",
"enum-iterator",
"getset",
"rustversion",
"thiserror",
"time",
]
@@ -12077,7 +12044,7 @@ version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"wasm-bindgen-macro",
]
@@ -12102,7 +12069,7 @@ version = "0.4.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"js-sys",
"wasm-bindgen",
"web-sys",
@@ -12592,7 +12559,7 @@ version = "0.50.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"windows-sys 0.48.0",
]

View File

@@ -71,6 +71,7 @@ license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
@@ -109,6 +110,7 @@ datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
derive_builder = "0.12"
@@ -118,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a11db14b8502f55ca5348917fd18e6fcf140f55e" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -138,6 +140,7 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4" }
prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
@@ -168,7 +171,7 @@ tokio = { version = "1.36", features = ["full"] }
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls"] }
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
@@ -246,6 +249,11 @@ lto = "thin"
debug = false
incremental = false
[profile.ci]
inherits = "dev"
debug = false
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true

View File

@@ -199,7 +199,7 @@ config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:latest \
toml2docs/toml2docs:v0.1.1 \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md

View File

@@ -1,10 +1,16 @@
# Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Cluster Mode
## Distributed Mode
### Frontend

View File

@@ -1,5 +1,11 @@
# Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode
| Key | Type | Default | Descriptions |
@@ -14,6 +20,11 @@
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -27,7 +38,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
@@ -96,6 +107,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -131,7 +146,7 @@
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
## Cluster Mode
## Distributed Mode
### Frontend
@@ -149,6 +164,11 @@
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -162,7 +182,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
@@ -344,6 +364,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |

View File

@@ -324,6 +324,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"

View File

@@ -30,6 +30,23 @@ addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
@@ -70,7 +87,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section.
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
mode = "disable"

View File

@@ -25,6 +25,23 @@ addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
@@ -65,7 +82,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section.
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
mode = "disable"
@@ -367,6 +384,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"

View File

@@ -0,0 +1,83 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common"
import {context} from "@actions/github"
import _ from "lodash"
async function main() {
const success = process.env["CI_REPORT_STATUS"] === "true"
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
const {owner, repo} = context.repo
const labels = ['O-ci-failure']
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
labels: labels.join(','),
state: "open",
sort: "created",
direction: "desc",
});
const issue = _.find(issues, (i) => i.title === title);
if (issue) { // exist issue
core.info(`Found previous issue ${issue.html_url}`)
if (!success) {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: failure_comment,
})
} else {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: success_comment,
})
await client.rest.issues.update({
owner,
repo,
issue_number: issue.number,
state: "closed",
state_reason: "completed",
})
}
core.setOutput("html_url", issue.html_url)
} else if (!success) { // create new issue for failure
const issue = await client.rest.issues.create({
owner,
repo,
title,
labels,
body: failure_comment,
})
core.info(`Created issue ${issue.data.html_url}`)
core.setOutput("html_url", issue.data.html_url)
}
}
main().catch(handleError)

View File

@@ -0,0 +1,16 @@
FROM ubuntu:22.04
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
curl
ARG BINARY_PATH
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android
# Install cargo-ndk
RUN cargo install cargo-ndk
RUN cargo install cargo-ndk@3.5.4
ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint.

View File

@@ -0,0 +1,58 @@
# TSBS benchmark - v0.8.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 315369.66 |
| EC2 c5d.2xlarge | 222148.56 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 24.63 | 15.29 |
| cpu-max-all-8 | 51.69 | 33.53 |
| double-groupby-1 | 673.51 | 1295.38 |
| double-groupby-5 | 1244.93 | 1993.91 |
| double-groupby-all | 2215.44 | 3056.77 |
| groupby-orderby-limit | 754.50 | 1546.49 |
| high-cpu-1 | 19.62 | 11.58 |
| high-cpu-all | 5402.31 | 8011.43 |
| lastpoint | 6756.12 | 9312.67 |
| single-groupby-1-1-1 | 15.70 | 7.67 |
| single-groupby-1-1-12 | 16.72 | 9.29 |
| single-groupby-1-8-1 | 26.72 | 17.97 |
| single-groupby-5-1-1 | 18.17 | 10.09 |
| single-groupby-5-1-12 | 20.04 | 12.37 |
| single-groupby-5-8-1 | 35.63 | 23.13 |
`single-groupby-1-1-1` query throughput
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
| --------------- | ------------------ | -------------- | ----------------- |
| Local | 50 | 42.87 | 1165.73 |
| Local | 100 | 89.29 | 1119.38 |
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |

View File

@@ -14,10 +14,9 @@
use arrow::array::StringArray;
use arrow::compute::kernels::comparison;
use common_query::logical_plan::DfExpr;
use datafusion::common::ScalarValue;
use datafusion::logical_expr::expr::Like;
use datafusion::logical_expr::Operator;
use datafusion::logical_expr::{Expr, Operator};
use datatypes::value::Value;
use store_api::storage::ScanRequest;
@@ -118,12 +117,12 @@ impl Predicate {
}
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
fn from_expr(expr: DfExpr) -> Option<Predicate> {
fn from_expr(expr: Expr) -> Option<Predicate> {
match expr {
// NOT expr
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
// expr LIKE pattern
DfExpr::Like(Like {
Expr::Like(Like {
negated,
expr,
pattern,
@@ -131,10 +130,10 @@ impl Predicate {
..
}) if is_column(&expr) && is_string_literal(&pattern) => {
// Safety: ensured by gurad
let DfExpr::Column(c) = *expr else {
let Expr::Column(c) = *expr else {
unreachable!();
};
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
unreachable!();
};
@@ -147,10 +146,10 @@ impl Predicate {
}
}
// left OP right
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
// left == right
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else {
return None;
};
@@ -158,8 +157,8 @@ impl Predicate {
Some(Predicate::Eq(c.name, v))
}
// left != right
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else {
return None;
};
@@ -183,14 +182,14 @@ impl Predicate {
_ => None,
},
// [NOT] IN (LIST)
DfExpr::InList(list) => {
Expr::InList(list) => {
match (*list.expr, list.list, list.negated) {
// column [NOT] IN (v1, v2, v3, ...)
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
(Expr::Column(c), list, negated) if is_all_scalars(&list) => {
let mut values = Vec::with_capacity(list.len());
for scalar in list {
// Safety: checked by `is_all_scalars`
let DfExpr::Literal(scalar) = scalar else {
let Expr::Literal(scalar) = scalar else {
unreachable!();
};
@@ -237,12 +236,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
Some(booleans.value(0))
}
fn is_string_literal(expr: &DfExpr) -> bool {
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
fn is_string_literal(expr: &Expr) -> bool {
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
}
fn is_column(expr: &DfExpr) -> bool {
matches!(expr, DfExpr::Column(_))
fn is_column(expr: &Expr) -> bool {
matches!(expr, Expr::Column(_))
}
/// A list of predicate
@@ -257,7 +256,7 @@ impl Predicates {
let mut predicates = Vec::with_capacity(request.filters.len());
for filter in &request.filters {
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
if let Some(predicate) = Predicate::from_expr(filter.clone()) {
predicates.push(predicate);
}
}
@@ -286,8 +285,8 @@ impl Predicates {
}
/// Returns true when the values are all [`DfExpr::Literal`].
fn is_all_scalars(list: &[DfExpr]) -> bool {
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
fn is_all_scalars(list: &[Expr]) -> bool {
list.iter().all(|v| matches!(v, Expr::Literal(_)))
}
#[cfg(test)]
@@ -376,7 +375,7 @@ mod tests {
#[test]
fn test_predicate_like() {
// case insensitive
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: false,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -403,7 +402,7 @@ mod tests {
assert!(p.eval(&[]).is_none());
// case sensitive
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: false,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -423,7 +422,7 @@ mod tests {
assert!(p.eval(&[]).is_none());
// not like
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: true,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -437,15 +436,15 @@ mod tests {
assert!(p.eval(&[]).is_none());
}
fn column(name: &str) -> DfExpr {
DfExpr::Column(Column {
fn column(name: &str) -> Expr {
Expr::Column(Column {
relation: None,
name: name.to_string(),
})
}
fn string_literal(v: &str) -> DfExpr {
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
fn string_literal(v: &str) -> Expr {
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
}
fn match_string_value(v: &Value, expected: &str) -> bool {
@@ -463,14 +462,14 @@ mod tests {
result
}
fn mock_exprs() -> (DfExpr, DfExpr) {
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
fn mock_exprs() -> (Expr, Expr) {
let expr1 = Expr::BinaryExpr(BinaryExpr {
left: Box::new(column("a")),
op: Operator::Eq,
right: Box::new(string_literal("a_value")),
});
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
let expr2 = Expr::BinaryExpr(BinaryExpr {
left: Box::new(column("b")),
op: Operator::NotEq,
right: Box::new(string_literal("b_value")),
@@ -491,17 +490,17 @@ mod tests {
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
&& match_string_value(v, "b_value")));
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
let and_expr = Expr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()),
op: Operator::And,
right: Box::new(expr2.clone()),
});
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
let or_expr = Expr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()),
op: Operator::Or,
right: Box::new(expr2.clone()),
});
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
let not_expr = Expr::Not(Box::new(expr1.clone()));
let and_p = Predicate::from_expr(and_expr).unwrap();
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
@@ -510,7 +509,7 @@ mod tests {
let not_p = Predicate::from_expr(not_expr).unwrap();
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
let inlist_expr = DfExpr::InList(InList {
let inlist_expr = Expr::InList(InList {
expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")],
negated: false,
@@ -520,7 +519,7 @@ mod tests {
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
&& match_string_values(values, &["a1", "a2"])));
let inlist_expr = DfExpr::InList(InList {
let inlist_expr = Expr::InList(InList {
expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")],
negated: true,
@@ -540,7 +539,7 @@ mod tests {
let (expr1, expr2) = mock_exprs();
let request = ScanRequest {
filters: vec![expr1.into(), expr2.into()],
filters: vec![expr1, expr2],
..Default::default()
};
let predicates = Predicates::from_scan_request(&Some(request));
@@ -578,7 +577,7 @@ mod tests {
let (expr1, expr2) = mock_exprs();
let request = ScanRequest {
filters: vec![expr1.into(), expr2.into()],
filters: vec![expr1, expr2],
..Default::default()
};
let predicates = Predicates::from_scan_request(&Some(request));

View File

@@ -19,9 +19,10 @@ use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
use api::v1::HealthCheckRequest;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
@@ -86,6 +87,17 @@ impl Client {
Self::with_manager_and_urls(ChannelManager::new(), urls)
}
pub fn with_tls_and_urls<U, A>(urls: A, client_tls: ClientTlsOption) -> Result<Self>
where
U: AsRef<str>,
A: AsRef<[U]>,
{
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
let channel_manager = ChannelManager::with_tls_config(channel_config)
.context(error::CreateTlsChannelSnafu)?;
Ok(Self::with_manager_and_urls(channel_manager, urls))
}
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
where
U: AsRef<str>,
@@ -151,24 +163,34 @@ impl Client {
pub fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
let client = FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok(FlightClient { addr, client })
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PbRegionClient::new(channel)
let client = PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()))
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok(client)
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PrometheusGatewayClient::new(channel))
let client = PrometheusGatewayClient::new(channel)
.accept_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Gzip)
.send_compressed(CompressionEncoding::Zstd);
Ok(client)
}
pub async fn health_check(&self) -> Result<()> {

View File

@@ -82,6 +82,13 @@ pub enum Error {
source: common_grpc::error::Error,
},
#[snafu(display("Failed to create Tls channel manager"))]
CreateTlsChannel {
#[snafu(implicit)]
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer, code: {}", code))]
RegionServer {
code: Code,
@@ -129,9 +136,9 @@ impl ErrorExt for Error {
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code()
}
Error::CreateChannel { source, .. }
| Error::ConvertFlightData { source, .. }
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
}
}

View File

@@ -74,6 +74,7 @@ substrait.workspace = true
table.workspace = true
tokio.workspace = true
toml.workspace = true
tracing-appender = "0.2"
[target.'cfg(not(windows))'.dependencies]
tikv-jemallocator = "0.5"

View File

@@ -14,13 +14,11 @@
#![doc = include_str!("../../../../README.md")]
use std::fmt;
use clap::{Parser, Subcommand};
use cmd::error::Result;
use cmd::options::{GlobalOptions, Options};
use cmd::{cli, datanode, frontend, log_versions, metasrv, standalone, start_app, App};
use common_version::{short_version, version};
use cmd::options::GlobalOptions;
use cmd::{cli, datanode, frontend, metasrv, standalone, App};
use common_version::version;
#[derive(Parser)]
#[command(name = "greptime", author, version, long_version = version!(), about)]
@@ -56,58 +54,6 @@ enum SubCommand {
Cli(cli::Command),
}
impl SubCommand {
async fn build(self, opts: Options) -> Result<Box<dyn App>> {
let app: Box<dyn App> = match (self, opts) {
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
let app = cmd.build(*dn_opts).await?;
Box::new(app) as _
}
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
let app = cmd.build(*fe_opts).await?;
Box::new(app) as _
}
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
let app = cmd.build(*meta_opts).await?;
Box::new(app) as _
}
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
let app = cmd.build(*opts).await?;
Box::new(app) as _
}
(SubCommand::Cli(cmd), Options::Cli(_)) => {
let app = cmd.build().await?;
Box::new(app) as _
}
_ => unreachable!(),
};
Ok(app)
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Datanode(cmd) => cmd.load_options(global_options),
SubCommand::Frontend(cmd) => cmd.load_options(global_options),
SubCommand::Metasrv(cmd) => cmd.load_options(global_options),
SubCommand::Standalone(cmd) => cmd.load_options(global_options),
SubCommand::Cli(cmd) => cmd.load_options(global_options),
}
}
}
impl fmt::Display for SubCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SubCommand::Datanode(..) => write!(f, "greptime-datanode"),
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
SubCommand::Cli(_) => write!(f, "greptime-cli"),
}
}
}
#[cfg(not(windows))]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
@@ -119,24 +65,38 @@ async fn main() -> Result<()> {
}
async fn start(cli: Command) -> Result<()> {
let subcmd = cli.subcmd;
let app_name = subcmd.to_string();
let opts = subcmd.load_options(&cli.global_options)?;
let _guard = common_telemetry::init_global_logging(
&app_name,
opts.logging_options(),
&cli.global_options.tracing_options(),
opts.node_id(),
);
log_versions(version!(), short_version!());
let app = subcmd.build(opts).await?;
start_app(app).await
match cli.subcmd {
SubCommand::Datanode(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
.run()
.await
}
SubCommand::Frontend(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
.run()
.await
}
SubCommand::Metasrv(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
.run()
.await
}
SubCommand::Standalone(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
.run()
.await
}
SubCommand::Cli(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
.run()
.await
}
}
}
fn setup_human_panic() {

View File

@@ -30,15 +30,18 @@ mod upgrade;
use async_trait::async_trait;
use bench::BenchTableMetadataCommand;
use clap::Parser;
use common_telemetry::logging::LoggingOptions;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use tracing_appender::non_blocking::WorkerGuard;
// pub use repl::Repl;
use upgrade::UpgradeCommand;
use self::export::ExportCommand;
use crate::error::Result;
use crate::options::{GlobalOptions, Options};
use crate::options::GlobalOptions;
use crate::App;
pub const APP_NAME: &str = "greptime-cli";
#[async_trait]
pub trait Tool: Send + Sync {
async fn do_work(&self) -> Result<()>;
@@ -46,18 +49,24 @@ pub trait Tool: Send + Sync {
pub struct Instance {
tool: Box<dyn Tool>,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
impl Instance {
fn new(tool: Box<dyn Tool>) -> Self {
Self { tool }
fn new(tool: Box<dyn Tool>, guard: Vec<WorkerGuard>) -> Self {
Self {
tool,
_guard: guard,
}
}
}
#[async_trait]
impl App for Instance {
fn name(&self) -> &str {
"greptime-cli"
APP_NAME
}
async fn start(&mut self) -> Result<()> {
@@ -80,11 +89,18 @@ pub struct Command {
}
impl Command {
pub async fn build(self) -> Result<Instance> {
self.cmd.build().await
pub async fn build(&self, opts: LoggingOptions) -> Result<Instance> {
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts,
&TracingOptions::default(),
None,
);
self.cmd.build(guard).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<LoggingOptions> {
let mut logging_opts = LoggingOptions::default();
if let Some(dir) = &global_options.log_dir {
@@ -93,7 +109,7 @@ impl Command {
logging_opts.level.clone_from(&global_options.log_level);
Ok(Options::Cli(Box::new(logging_opts)))
Ok(logging_opts)
}
}
@@ -106,12 +122,12 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self) -> Result<Instance> {
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
match self {
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Upgrade(cmd) => cmd.build().await,
SubCommand::Bench(cmd) => cmd.build().await,
SubCommand::Export(cmd) => cmd.build().await,
SubCommand::Upgrade(cmd) => cmd.build(guard).await,
SubCommand::Bench(cmd) => cmd.build(guard).await,
SubCommand::Export(cmd) => cmd.build(guard).await,
}
}
}

View File

@@ -30,6 +30,7 @@ use datatypes::schema::{ColumnSchema, RawSchema};
use rand::Rng;
use store_api::storage::RegionNumber;
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
use tracing_appender::non_blocking::WorkerGuard;
use self::metadata::TableMetadataBencher;
use crate::cli::{Instance, Tool};
@@ -61,7 +62,7 @@ pub struct BenchTableMetadataCommand {
}
impl BenchTableMetadataCommand {
pub async fn build(&self) -> Result<Instance> {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
.await
.unwrap();
@@ -72,7 +73,7 @@ impl BenchTableMetadataCommand {
table_metadata_manager,
count: self.count,
};
Ok(Instance::new(Box::new(tool)))
Ok(Instance::new(Box::new(tool), guard))
}
}

View File

@@ -30,6 +30,7 @@ use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::{Instance, Tool};
use crate::error::{
@@ -80,7 +81,7 @@ pub struct ExportCommand {
}
impl ExportCommand {
pub async fn build(&self) -> Result<Instance> {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = split_database(&self.database)?;
let auth_header = if let Some(basic) = &self.auth_basic {
@@ -90,15 +91,18 @@ impl ExportCommand {
None
};
Ok(Instance::new(Box::new(Export {
addr: self.addr.clone(),
catalog,
schema,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
auth_header,
})))
Ok(Instance::new(
Box::new(Export {
addr: self.addr.clone(),
catalog,
schema,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
auth_header,
}),
guard,
))
}
}

View File

@@ -40,6 +40,7 @@ use etcd_client::Client;
use futures::TryStreamExt;
use prost::Message;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
use crate::cli::{Instance, Tool};
@@ -63,7 +64,7 @@ pub struct UpgradeCommand {
}
impl UpgradeCommand {
pub async fn build(&self) -> Result<Instance> {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let client = Client::connect([&self.etcd_addr], None)
.await
.context(ConnectEtcdSnafu {
@@ -77,7 +78,7 @@ impl UpgradeCommand {
skip_schema_keys: self.skip_schema_keys,
skip_table_route_keys: self.skip_table_route_keys,
};
Ok(Instance::new(Box::new(tool)))
Ok(Instance::new(Box::new(tool), guard))
}
}

View File

@@ -21,6 +21,7 @@ use clap::Parser;
use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use common_wal::config::DatanodeWalConfig;
use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
@@ -28,20 +29,29 @@ use datanode::service::DatanodeServiceBuilder;
use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
};
use crate::options::{GlobalOptions, Options};
use crate::App;
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-datanode";
pub struct Instance {
datanode: Datanode,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
impl Instance {
pub fn new(datanode: Datanode) -> Self {
Self { datanode }
pub fn new(datanode: Datanode, guard: Vec<WorkerGuard>) -> Self {
Self {
datanode,
_guard: guard,
}
}
pub fn datanode_mut(&mut self) -> &mut Datanode {
@@ -56,7 +66,7 @@ impl Instance {
#[async_trait]
impl App for Instance {
fn name(&self) -> &str {
"greptime-datanode"
APP_NAME
}
async fn start(&mut self) -> Result<()> {
@@ -82,11 +92,11 @@ pub struct Command {
}
impl Command {
pub async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
pub async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
self.subcmd.load_options(global_options)
}
}
@@ -97,13 +107,13 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
@@ -135,17 +145,15 @@ struct StartCommand {
}
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
Ok(Options::Datanode(Box::new(
self.merge_with_cli_options(
global_options,
DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)?,
)))
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
self.merge_with_cli_options(
global_options,
DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
}
// The precedence order is: cli > config file > environment variables > default values.
@@ -226,7 +234,15 @@ impl StartCommand {
Ok(opts)
}
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.logging,
&opts.tracing,
opts.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
@@ -265,7 +281,7 @@ impl StartCommand {
.context(StartDatanodeSnafu)?;
datanode.setup_services(services);
Ok(Instance::new(datanode))
Ok(Instance::new(datanode, guard))
}
}
@@ -337,10 +353,7 @@ mod tests {
..Default::default()
};
let Options::Datanode(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!(Some(42), options.node_id);
@@ -399,23 +412,19 @@ mod tests {
#[test]
fn test_try_from_cmd() {
if let Options::Datanode(opt) = StartCommand::default()
let opt = StartCommand::default()
.load_options(&GlobalOptions::default())
.unwrap()
{
assert_eq!(Mode::Standalone, opt.mode)
}
.unwrap();
assert_eq!(Mode::Standalone, opt.mode);
if let Options::Datanode(opt) = (StartCommand {
let opt = (StartCommand {
node_id: Some(42),
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(&GlobalOptions::default())
.unwrap()
{
assert_eq!(Mode::Distributed, opt.mode)
}
.unwrap();
assert_eq!(Mode::Distributed, opt.mode);
assert!((StartCommand {
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
@@ -447,7 +456,7 @@ mod tests {
})
.unwrap();
let logging_opt = options.logging_options();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
@@ -527,11 +536,7 @@ mod tests {
..Default::default()
};
let Options::Datanode(opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {

View File

@@ -31,6 +31,7 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
@@ -41,20 +42,29 @@ use meta_client::MetaClientOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
};
use crate::options::{GlobalOptions, Options};
use crate::App;
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub struct Instance {
frontend: FeInstance,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
pub const APP_NAME: &str = "greptime-frontend";
impl Instance {
pub fn new(frontend: FeInstance) -> Self {
Self { frontend }
pub fn new(frontend: FeInstance, guard: Vec<WorkerGuard>) -> Self {
Self {
frontend,
_guard: guard,
}
}
pub fn mut_inner(&mut self) -> &mut FeInstance {
@@ -69,7 +79,7 @@ impl Instance {
#[async_trait]
impl App for Instance {
fn name(&self) -> &str {
"greptime-frontend"
APP_NAME
}
async fn start(&mut self) -> Result<()> {
@@ -95,11 +105,11 @@ pub struct Command {
}
impl Command {
pub async fn build(self, opts: FrontendOptions) -> Result<Instance> {
pub async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
self.subcmd.load_options(global_options)
}
}
@@ -110,13 +120,13 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
@@ -156,17 +166,15 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
Ok(Options::Frontend(Box::new(
self.merge_with_cli_options(
global_options,
FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)?,
)))
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
self.merge_with_cli_options(
global_options,
FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
}
// The precedence order is: cli > config file > environment variables > default values.
@@ -208,6 +216,7 @@ impl StartCommand {
if let Some(addr) = &self.rpc_addr {
opts.grpc.addr.clone_from(addr);
opts.grpc.tls = tls_opts.clone();
}
if let Some(addr) = &self.mysql_addr {
@@ -239,7 +248,15 @@ impl StartCommand {
Ok(opts)
}
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.logging,
&opts.tracing,
opts.node_id.clone(),
);
log_versions(version!(), short_version!());
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
@@ -349,7 +366,7 @@ impl StartCommand {
.build_servers(opts, servers)
.context(StartFrontendSnafu)?;
Ok(Instance::new(instance))
Ok(Instance::new(instance, guard))
}
}
@@ -379,10 +396,7 @@ mod tests {
..Default::default()
};
let Options::Frontend(opts) = command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
assert_eq!(opts.http.addr, "127.0.0.1:1234");
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
@@ -430,10 +444,7 @@ mod tests {
..Default::default()
};
let Options::Frontend(fe_opts) = command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
@@ -486,7 +497,7 @@ mod tests {
})
.unwrap();
let logging_opt = options.logging_options();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
@@ -562,11 +573,7 @@ mod tests {
..Default::default()
};
let Options::Frontend(fe_opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql.runtime_size, 11);

View File

@@ -17,6 +17,8 @@
use async_trait::async_trait;
use common_telemetry::{error, info};
use crate::error::Result;
pub mod cli;
pub mod datanode;
pub mod error;
@@ -35,39 +37,39 @@ pub trait App: Send {
fn name(&self) -> &str;
/// A hook for implementor to make something happened before actual startup. Defaults to no-op.
async fn pre_start(&mut self) -> error::Result<()> {
async fn pre_start(&mut self) -> Result<()> {
Ok(())
}
async fn start(&mut self) -> error::Result<()>;
async fn start(&mut self) -> Result<()>;
/// Waits the quit signal by default.
fn wait_signal(&self) -> bool {
true
}
async fn stop(&self) -> error::Result<()>;
}
async fn stop(&self) -> Result<()>;
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
info!("Starting app: {}", app.name());
async fn run(&mut self) -> Result<()> {
info!("Starting app: {}", self.name());
app.pre_start().await?;
self.pre_start().await?;
app.start().await?;
self.start().await?;
if app.wait_signal() {
if let Err(e) = tokio::signal::ctrl_c().await {
error!("Failed to listen for ctrl-c signal: {}", e);
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
// the underlying system. So we stop the app instead of running nonetheless to let people
// investigate the issue.
if self.wait_signal() {
if let Err(e) = tokio::signal::ctrl_c().await {
error!(e; "Failed to listen for ctrl-c signal");
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
// the underlying system. So we stop the app instead of running nonetheless to let people
// investigate the issue.
}
}
}
app.stop().await?;
info!("Goodbye!");
Ok(())
self.stop().await?;
info!("Goodbye!");
Ok(())
}
}
/// Log the versions of the application, and the arguments passed to the cli.

View File

@@ -19,28 +19,38 @@ use clap::Parser;
use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
use crate::options::{GlobalOptions, Options};
use crate::App;
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-metasrv";
pub struct Instance {
instance: MetasrvInstance,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
impl Instance {
fn new(instance: MetasrvInstance) -> Self {
Self { instance }
fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self {
Self {
instance,
_guard: guard,
}
}
}
#[async_trait]
impl App for Instance {
fn name(&self) -> &str {
"greptime-metasrv"
APP_NAME
}
async fn start(&mut self) -> Result<()> {
@@ -66,11 +76,11 @@ pub struct Command {
}
impl Command {
pub async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
self.subcmd.load_options(global_options)
}
}
@@ -81,13 +91,13 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
@@ -128,17 +138,15 @@ struct StartCommand {
}
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
Ok(Options::Metasrv(Box::new(
self.merge_with_cli_options(
global_options,
MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)?,
)))
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
self.merge_with_cli_options(
global_options,
MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
}
// The precedence order is: cli > config file > environment variables > default values.
@@ -212,7 +220,11 @@ impl StartCommand {
Ok(opts)
}
async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> {
async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
log_versions(version!(), short_version!());
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
@@ -229,7 +241,7 @@ impl StartCommand {
.await
.context(error::BuildMetaServerSnafu)?;
Ok(Instance::new(instance))
Ok(Instance::new(instance, guard))
}
}
@@ -254,9 +266,7 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
unreachable!()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
assert_eq!(SelectorType::LoadBased, options.selector);
@@ -289,9 +299,7 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
unreachable!()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
@@ -343,7 +351,7 @@ mod tests {
})
.unwrap();
let logging_opt = options.logging_options();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
@@ -398,11 +406,7 @@ mod tests {
..Default::default()
};
let Options::Metasrv(opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");

View File

@@ -13,20 +13,6 @@
// limitations under the License.
use clap::Parser;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use datanode::config::DatanodeOptions;
use frontend::frontend::FrontendOptions;
use meta_srv::metasrv::MetasrvOptions;
use crate::standalone::StandaloneOptions;
pub enum Options {
Datanode(Box<DatanodeOptions>),
Frontend(Box<FrontendOptions>),
Metasrv(Box<MetasrvOptions>),
Standalone(Box<StandaloneOptions>),
Cli(Box<LoggingOptions>),
}
#[derive(Parser, Default, Debug, Clone)]
pub struct GlobalOptions {
@@ -43,32 +29,3 @@ pub struct GlobalOptions {
#[arg(global = true)]
pub tokio_console_addr: Option<String>,
}
impl GlobalOptions {
pub fn tracing_options(&self) -> TracingOptions {
TracingOptions {
#[cfg(feature = "tokio-console")]
tokio_console_addr: self.tokio_console_addr.clone(),
}
}
}
impl Options {
pub fn logging_options(&self) -> &LoggingOptions {
match self {
Options::Datanode(opts) => &opts.logging,
Options::Frontend(opts) => &opts.logging,
Options::Metasrv(opts) => &opts.logging,
Options::Standalone(opts) => &opts.logging,
Options::Cli(opts) => opts,
}
}
pub fn node_id(&self) -> Option<String> {
match self {
Options::Metasrv(_) | Options::Cli(_) | Options::Standalone(_) => None,
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
Options::Frontend(opt) => opt.node_id.clone(),
}
}
}

View File

@@ -41,6 +41,7 @@ use common_procedure::ProcedureManagerRef;
use common_telemetry::info;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
use common_wal::config::StandaloneWalConfig;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
@@ -61,6 +62,7 @@ use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
@@ -68,8 +70,10 @@ use crate::error::{
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{GlobalOptions, Options};
use crate::App;
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-standalone";
#[derive(Parser)]
pub struct Command {
@@ -78,11 +82,11 @@ pub struct Command {
}
impl Command {
pub async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
self.subcmd.load_options(global_options)
}
}
@@ -93,13 +97,13 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
@@ -207,12 +211,15 @@ pub struct Instance {
frontend: FeInstance,
procedure_manager: ProcedureManagerRef,
wal_options_allocator: WalOptionsAllocatorRef,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[async_trait]
impl App for Instance {
fn name(&self) -> &str {
"greptime-standalone"
APP_NAME
}
async fn start(&mut self) -> Result<()> {
@@ -287,21 +294,19 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
Ok(Options::Standalone(Box::new(
self.merge_with_cli_options(
global_options,
StandaloneOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)?,
)))
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
self.merge_with_cli_options(
global_options,
StandaloneOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
pub fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
mut opts: StandaloneOptions,
@@ -373,7 +378,11 @@ impl StartCommand {
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
log_versions(version!(), short_version!());
info!("Standalone start command: {:#?}", self);
info!("Building standalone instance with {opts:#?}");
@@ -516,6 +525,7 @@ impl StartCommand {
frontend,
procedure_manager,
wal_options_allocator,
_guard: guard,
})
}
@@ -665,10 +675,7 @@ mod tests {
..Default::default()
};
let Options::Standalone(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let fe_opts = options.frontend_options();
let dn_opts = options.datanode_options();
let logging_opts = options.logging;
@@ -721,7 +728,7 @@ mod tests {
..Default::default()
};
let Options::Standalone(opts) = cmd
let opts = cmd
.load_options(&GlobalOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
@@ -729,10 +736,7 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap()
else {
unreachable!()
};
.unwrap();
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
@@ -794,11 +798,7 @@ mod tests {
..Default::default()
};
let Options::Standalone(opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");

View File

@@ -13,7 +13,9 @@
// limitations under the License.
use std::fmt;
use std::str::FromStr;
use api::v1::region::{compact_request, StrictWindow};
use common_error::ext::BoxedError;
use common_macro::admin_fn;
use common_query::error::Error::ThreadJoin;
@@ -22,7 +24,7 @@ use common_query::error::{
UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use common_telemetry::error;
use common_telemetry::{error, info};
use datatypes::prelude::*;
use datatypes::vectors::VectorRef;
use session::context::QueryContextRef;
@@ -34,71 +36,78 @@ use crate::ensure_greptime;
use crate::function::{Function, FunctionContext};
use crate::handlers::TableMutationHandlerRef;
macro_rules! define_table_function {
($name: expr, $display_name_str: expr, $display_name: ident, $func: ident, $request: ident) => {
/// A function to $func table, such as `$display_name(table_name)`.
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = "signature", ret = "uint64")]
pub(crate) async fn $display_name(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
ensure!(
params.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 1, have: {}",
params.len()
),
}
);
/// Compact type: strict window.
const COMPACT_TYPE_STRICT_WINDOW: &str = "strict_window";
let ValueRef::String(table_name) = params[0] else {
return UnsupportedInputDataTypeSnafu {
function: $display_name_str,
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
};
let (catalog_name, schema_name, table_name) =
table_name_to_full_name(table_name, &query_ctx)
.map_err(BoxedError::new)
.context(TableMutationSnafu)?;
let affected_rows = table_mutation_handler
.$func(
$request {
catalog_name,
schema_name,
table_name,
},
query_ctx.clone(),
)
.await?;
Ok(Value::from(affected_rows as u64))
#[admin_fn(
name = "FlushTableFunction",
display_name = "flush_table",
sig_fn = "flush_signature",
ret = "uint64"
)]
pub(crate) async fn flush_table(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
ensure!(
params.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 1, have: {}",
params.len()
),
}
);
let ValueRef::String(table_name) = params[0] else {
return UnsupportedInputDataTypeSnafu {
function: "flush_table",
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
};
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
.map_err(BoxedError::new)
.context(TableMutationSnafu)?;
let affected_rows = table_mutation_handler
.flush(
FlushTableRequest {
catalog_name,
schema_name,
table_name,
},
query_ctx.clone(),
)
.await?;
Ok(Value::from(affected_rows as u64))
}
define_table_function!(
"FlushTableFunction",
"flush_table",
flush_table,
flush,
FlushTableRequest
);
#[admin_fn(
name = "CompactTableFunction",
display_name = "compact_table",
sig_fn = "compact_signature",
ret = "uint64"
)]
pub(crate) async fn compact_table(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
let request = parse_compact_params(params, query_ctx)?;
info!("Compact table request: {:?}", request);
define_table_function!(
"CompactTableFunction",
"compact_table",
compact_table,
compact,
CompactTableRequest
);
let affected_rows = table_mutation_handler
.compact(request, query_ctx.clone())
.await?;
fn signature() -> Signature {
Ok(Value::from(affected_rows as u64))
}
fn flush_signature() -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::string_datatype()],
@@ -106,12 +115,98 @@ fn signature() -> Signature {
)
}
fn compact_signature() -> Signature {
Signature::variadic(
vec![ConcreteDataType::string_datatype()],
Volatility::Immutable,
)
}
/// Parses `compact_table` UDF parameters. This function accepts following combinations:
/// - `[<table_name>]`: only tables name provided, using default compaction type: regular
/// - `[<table_name>, <type>]`: specify table name and compaction type. The compaction options will be default.
/// - `[<table_name>, <type>, <options>]`: provides both type and type-specific options.
fn parse_compact_params(
params: &[ValueRef<'_>],
query_ctx: &QueryContextRef,
) -> Result<CompactTableRequest> {
ensure!(
!params.is_empty(),
InvalidFuncArgsSnafu {
err_msg: "Args cannot be empty",
}
);
let (table_name, compact_type) = match params {
[ValueRef::String(table_name)] => (
table_name,
compact_request::Options::Regular(Default::default()),
),
[ValueRef::String(table_name), ValueRef::String(compact_ty_str)] => {
let compact_type = parse_compact_type(compact_ty_str, None)?;
(table_name, compact_type)
}
[ValueRef::String(table_name), ValueRef::String(compact_ty_str), ValueRef::String(options_str)] =>
{
let compact_type = parse_compact_type(compact_ty_str, Some(options_str))?;
(table_name, compact_type)
}
_ => {
return UnsupportedInputDataTypeSnafu {
function: "compact_table",
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail()
}
};
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
.map_err(BoxedError::new)
.context(TableMutationSnafu)?;
Ok(CompactTableRequest {
catalog_name,
schema_name,
table_name,
compact_options: compact_type,
})
}
fn parse_compact_type(type_str: &str, option: Option<&str>) -> Result<compact_request::Options> {
if type_str.eq_ignore_ascii_case(COMPACT_TYPE_STRICT_WINDOW) {
let window_seconds = option
.map(|v| {
i64::from_str(v).map_err(|_| {
InvalidFuncArgsSnafu {
err_msg: format!(
"Compact window is expected to be a valid number, provided: {}",
v
),
}
.build()
})
})
.transpose()?
.unwrap_or(0);
Ok(compact_request::Options::StrictWindow(StrictWindow {
window_seconds,
}))
} else {
Ok(compact_request::Options::Regular(Default::default()))
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::v1::region::compact_request::Options;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::prelude::TypeSignature;
use datatypes::vectors::{StringVector, UInt64Vector};
use session::context::QueryContext;
use super::*;
@@ -174,5 +269,109 @@ mod tests {
define_table_function_test!(flush_table, FlushTableFunction);
define_table_function_test!(compact_table, CompactTableFunction);
fn check_parse_compact_params(cases: &[(&[&str], CompactTableRequest)]) {
for (params, expected) in cases {
let params = params
.iter()
.map(|s| ValueRef::String(s))
.collect::<Vec<_>>();
assert_eq!(
expected,
&parse_compact_params(&params, &QueryContext::arc()).unwrap()
);
}
}
#[test]
fn test_parse_compact_params() {
check_parse_compact_params(&[
(
&["table"],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::Regular(Default::default()),
},
),
(
&[&format!("{}.table", DEFAULT_SCHEMA_NAME)],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::Regular(Default::default()),
},
),
(
&[&format!(
"{}.{}.table",
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME
)],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::Regular(Default::default()),
},
),
(
&["table", "regular"],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::Regular(Default::default()),
},
),
(
&["table", "strict_window"],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::StrictWindow(StrictWindow { window_seconds: 0 }),
},
),
(
&["table", "strict_window", "3600"],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::StrictWindow(StrictWindow {
window_seconds: 3600,
}),
},
),
(
&["table", "regular", "abcd"],
CompactTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table".to_string(),
compact_options: Options::Regular(Default::default()),
},
),
]);
assert!(parse_compact_params(
&["table", "strict_window", "abc"]
.into_iter()
.map(ValueRef::String)
.collect::<Vec<_>>(),
&QueryContext::arc(),
)
.is_err());
assert!(parse_compact_params(
&["a.b.table", "strict_window", "abc"]
.into_iter()
.map(ValueRef::String)
.collect::<Vec<_>>(),
&QueryContext::arc(),
)
.is_err());
}
}

View File

@@ -23,9 +23,9 @@ async fn test_mtls_config() {
// test wrong file
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/wrong_server.cert.pem".to_string(),
client_cert_path: "tests/tls/wrong_client.cert.pem".to_string(),
client_key_path: "tests/tls/wrong_client.key.pem".to_string(),
server_ca_cert_path: "tests/tls/wrong_ca.pem".to_string(),
client_cert_path: "tests/tls/wrong_client.pem".to_string(),
client_key_path: "tests/tls/wrong_client.key".to_string(),
});
let re = ChannelManager::with_tls_config(config);
@@ -33,8 +33,8 @@ async fn test_mtls_config() {
// test corrupted file content
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
client_cert_path: "tests/tls/client.cert.pem".to_string(),
server_ca_cert_path: "tests/tls/ca.pem".to_string(),
client_cert_path: "tests/tls/client.pem".to_string(),
client_key_path: "tests/tls/corrupted".to_string(),
});
@@ -44,9 +44,9 @@ async fn test_mtls_config() {
// success
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
client_cert_path: "tests/tls/client.cert.pem".to_string(),
client_key_path: "tests/tls/client.key.pem".to_string(),
server_ca_cert_path: "tests/tls/ca.pem".to_string(),
client_cert_path: "tests/tls/client.pem".to_string(),
client_key_path: "tests/tls/client.key".to_string(),
});
let re = ChannelManager::with_tls_config(config).unwrap();

View File

@@ -0,0 +1,28 @@
-----BEGIN CERTIFICATE-----
MIIE3DCCA0SgAwIBAgIRAObeYbJFiVQSGR8yk44dsOYwDQYJKoZIhvcNAQELBQAw
gYUxHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTEtMCsGA1UECwwkbHVj
aW9ATHVjaW9zLVdvcmstTUJQIChMdWNpbyBGcmFuY28pMTQwMgYDVQQDDCtta2Nl
cnQgbHVjaW9ATHVjaW9zLVdvcmstTUJQIChMdWNpbyBGcmFuY28pMB4XDTE5MDky
OTIzMzUzM1oXDTI5MDkyOTIzMzUzM1owgYUxHjAcBgNVBAoTFW1rY2VydCBkZXZl
bG9wbWVudCBDQTEtMCsGA1UECwwkbHVjaW9ATHVjaW9zLVdvcmstTUJQIChMdWNp
byBGcmFuY28pMTQwMgYDVQQDDCtta2NlcnQgbHVjaW9ATHVjaW9zLVdvcmstTUJQ
IChMdWNpbyBGcmFuY28pMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA
y/vE61ItbN/1qMYt13LMf+le1svwfkCCOPsygk7nWeRXmomgUpymqn1LnWiuB0+e
4IdVH2f5E9DknWEpPhKIDMRTCbz4jTwQfHrxCb8EGj3I8oO73pJO5S/xCedM9OrZ
qWcYWwN0GQ8cO/ogazaoZf1uTrRNHyzRyQsKyb412kDBTNEeldJZ2ljKgXXvh4HO
2ZIk9K/ZAaAf6VN8K/89rlJ9/KPgRVNsyAapE+Pb8XXKtpzeFiEcUfuXVYWtkoW+
xyn/Zu8A1L2CXMQ1sARh7P/42BTMKr5pfraYgcBGxKXLrxoySpxCO9KqeVveKy1q
fPm5FCwFsXDr0koFLrCiR58mcIO/04Q9DKKTV4Z2a+LoqDJRY37KfBSc8sDMPhw5
k7g3WPoa6QwXRjZTCA5fHWVgLOtcwLsnju5tBE4LDxwF6s+1wPF8NI5yUfufcEjJ
Z6JBwgoWYosVj27Lx7KBNLU/57PX9ryee691zmtswt0tP0WVBAgalhYWg99RXoa3
AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwICBDASBgNVHRMBAf8ECDAGAQH/AgEAMB0G
A1UdDgQWBBQdvlE4Bdcsjc9oaxjDCRu5FiuZkzANBgkqhkiG9w0BAQsFAAOCAYEA
BP/6o1kPINksMJZSSXgNCPZskDLyGw7auUZBnQ0ocDT3W6gXQvT/27LM1Hxoj9Eh
qU1TYdEt7ppecLQSGvzQ02MExG7H75art75oLiB+A5agDira937YbK4MCjqW481d
bDhw6ixJnY1jIvwjEZxyH6g94YyL927aSPch51fys0kSnjkFzC2RmuzDADScc4XH
5P1+/3dnIm3M5yfpeUzoaOrTXNmhn8p0RDIGrZ5kA5eISIGGD3Mm8FDssUNKndtO
g4ojHUsxb14icnAYGeye1NOhGiqN6TEFcgr6MPd0XdFNZ5c0HUaBCfN6bc+JxDV5
MKZVJdNeJsYYwilgJNHAyZgCi30JC20xeYVtTF7CEEsMrFDGJ70Kz7o/FnRiFsA1
ZSwVVWhhkHG2VkT4vlo0O3fYeZpenYicvy+wZNTbGK83gzHWqxxNC1z3Etg5+HRJ
F9qeMWPyfA3IHYXygiMcviyLcyNGG/SJ0EhUpYBN/Gg7wI5yFkcsxUDPPzd23O0M
-----END CERTIFICATE-----

View File

@@ -1,40 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIG+jCCBOKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBhzELMAkGA1UEBhMCSU4x
EjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQkFOR0FMT1JFMRUwEwYDVQQK
DAxHb0xpbnV4Q2xvdWQxEjAQBgNVBAMMCWNhLXNlcnZlcjElMCMGCSqGSIb3DQEJ
ARYWYWRtaW5AZ29saW51eGNsb3VkLmNvbTAeFw0yMzAyMTQxMTM5NDBaFw0yNzA4
MjIxMTM5NDBaMHAxCzAJBgNVBAYTAklOMRIwEAYDVQQIDAlLYXJuYXRha2ExFTAT
BgNVBAoMDEdvTGludXhDbG91ZDEPMA0GA1UEAwwGc2VydmVyMSUwIwYJKoZIhvcN
AQkBFhZhZG1pbkBnb2xpbnV4Y2xvdWQuY29tMIICIjANBgkqhkiG9w0BAQEFAAOC
Ag8AMIICCgKCAgEAvVtxAoRjLRs3Ei4+CgzqJ2+bpc0sBdUm/4LM/D+0KbXxwD7w
HP6GcKl/9zf9GJg56pVXxXMaerMDLS4Est25+mBgqcePC6utCBYrKA25pKbkFkxZ
TPh9/R4RHGVJ3KHy9vc4VzqoV7XFMJFFUQ2fQywHZlXh6MNz0WPTIGaH7hvYoHbK
I3NpPq8TjRuuV61XB0hK+RW0K6/5Yuj74h/mfheX1VIUOjGwKnTPccZQAlrKYjeW
BZBS4YqahkTIaGLa06SdUSkuhL85rqAxWvhK9GIRlQLNYJOzg+E3jGyqf566xX60
fxM6alLYf+ZzCwSBuDDj5f+j752gPLYUI82YL4xQ+AEHNR8U1uMvt0EzzFt7mSRe
fobVr+Y2zpci+mo7kcQGOhenzGclsm+qXwMhYUnJcOYFZWtTJlFaaPreL4M3Dh+2
pmKj23ZU6zcT3MYtE6phjCLJl0DsFIcOn+tSqMdpwB20EeQjo9bVJuw/HJrlpcnY
U9aLsnm/4Ls5A0BQutZnxKBIJjpzp8VfK0WU8a4iKok3AS0z1/K+atNrgSUB9DCH
0MvLqqQmM9TdLcZj7NSEfLyyFVwPRc5dt4CrNDL7JUpMzt36ezU83JU+nfqWDZsL
+2JOaE4gGLZDcA3cfP83/mYRaAnYW/9W4vEnIpa6subzq1aFOeY/3dKLTx8CAwEA
AaOCAYUwggGBMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG
+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYD
VR0OBBYEFLijeA+RFDQtuVeMUkaXqF7LF50GMIG8BgNVHSMEgbQwgbGAFKVZwpSJ
CPkNwGXyJX1sl2Pbby4FoYGNpIGKMIGHMQswCQYDVQQGEwJJTjESMBAGA1UECAwJ
S2FybmF0YWthMRIwEAYDVQQHDAlCQU5HQUxPUkUxFTATBgNVBAoMDEdvTGludXhD
bG91ZDESMBAGA1UEAwwJY2Etc2VydmVyMSUwIwYJKoZIhvcNAQkBFhZhZG1pbkBn
b2xpbnV4Y2xvdWQuY29tggkA7NvbvF8jodEwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
JQQMMAoGCCsGAQUFBwMBMCkGA1UdEQQiMCCHBMCoAHKHBAoAAg+CEnNlcnZlci5l
eGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEAXvaS9+y5g2Kw/4EPsnhjpN1v
CxXW0+UYSWOaxVJdEAjGQI/1m9LOiF9IHImmiwluJ/Bex1TzuaTCKmpluPwGvd9D
Zgf0A5SmVqW4WTT4d2nSecxw4OICJ3j6ubKkvMVf9s+ZJwb+fMMUaSt80bWqp1TY
XbZguv67PkBECPqVe6rgzXnTLwM3lE8EgG8VtM3IOy9a5SIEjm5L8SQ2I2hiytmE
e4jR1fbZsB5NbBdfA3GFMKQEE2dIymkG3Bz71M3tZi1y4RnHtRKdrFtrIlgclrwd
nVnQn/NiXUOOzsL2+vwSF32SSbiLvOxu63qO1YDBkKVChog3P/2f6xcJ23wkbHlL
qaL2jvLo6ylvMPUYHf5ZWat5zayaGUMHYDKcbD4Dw7aY3M0tNgEHdqUqNePmKvmn
luyXof3KmmLgWlcfBoX96a7hXDtxFyB2N4nzfQBXh+0VAlgqa+ZZhpdEqRQaWkkR
MDBdsVJ9O3812IaNfMzpS1vb701GFDCM5Hcyw6a/v6Ln08NMhYut4saLi13kHilS
Wq7wOAfW3rzxuhjOJJxsi0jJNI775q+a/BbbG/CPl826bXPGH43BdPV8mKwsX5HM
wwDKf3otP/v7bxwJabfhv2EKUy+W1kkFW9FEZ919yTtfhSDrTNcrXtE7RkiAepfm
95I025URIlhJGLGBUlA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDyptbMyYWztgta
t1MXLMzIkaQdeeVbs1Y/qCpAdwZe/Y5ZpbzjGIjCxbB6vNRSnEbYKpytKHPzYfM7
8d8K8bPvpnqXIiTXFT0JQlw1OHLC1fr4e598GJumAmpMYFrtqv0fbmUFTuQGbHxe
OH2vji0bvr3NKZubMfkEZP3X4sNXXoXIuW2LaS8OMGKoJaeCBvdbszEiSGj/v9Bj
pM0yLTH89NNMX1T+FtTKnuXag5g7pr6lzJj83+MzAGy4nOjseSuUimuiyG90/C5t
A5wC0Qh5RbDnkFYhC44Kxof/i6+jnfateIPNiIIwQV+2f6G/aK1hgjekT10m/eoR
YDTf+e5ZAgMBAAECggEACODt7yRYjhDVLYaTtb9f5t7dYG67Y7WWLFIc6arxQryI
XuNfm/ej2WyeXn9WTYeGWBaHERbv1zH4UnMxNBdP/C7dQXZwXqZaS2JwOUpNeK+X
tUvgtAu6dkKUXSMRcKzXAjVp4N3YHhwOGOx8PNY49FDwZPdmyDD16aFAYIvdle6/
PSMrj38rB1sbQQdmRob2FjJBSDZ44nsr+/nilrcOFNfNnWv7tQIWYVXNcLfdK/WJ
ZCDFhA8lr/Yon6MEq6ApTj2ZYRRGXPd6UeASJkmTZEUIUbeDcje/MO8cHkREpuRH
wm3pCjR7OdO4vc+/d/QmEvu5ns6wbTauelYnL616YQKBgQD414gJtpCHauNEUlFB
v/R3DzPI5NGp9PAqovOD8nCbI49Mw61gP/ExTIPKiR5uUX/5EL04uspaNkuohXk+
ys0G5At0NfV7W39lzhvALEaSfleybvYxppbBrc20/q8Gvi/i30NY+1LM3RdtMiEw
hKHjU0SnFhJq0InFg3AO/iCeTQKBgQD5obkbzpOidSsa55aNsUlO2qjiUY9leq9b
irAohIZ8YnuuixYvkOeSeSz1eIrA4tECeAFSgTZxYe1Iz+USru2Xg/0xNte11dJD
rBoH/yMn2gDvBK7xQ6uFMPTeYtKG0vfvpXZYSWZzGntyrHTwFk6UV+xdrt9MBdd1
XdSn7bwOPQKBgC9VQAko8uDvUf+C8PXiv2uONrl13PPJJY3WpR9qFEVOREnDxszS
HNzVwxPZdTJiykbkCjoqPadfQJDzopZxGQLAifU29lTamKcSx3CMe3gOFDxaovXa
zD5XAxP0hfJwZsdu1G6uj5dsTrJ0oJ+L+wc0pZBqwGIU/L/XOo9/g1DZAoGAUebL
kuH98ik7EUK2VJq8EJERI9/ailLsQb6I+WIxtZGiPqwHhWencpkrNQZtj8dbB9JT
rLwUHrMgZOlAoRafgTyez4zMzS3wJJ/Mkp8U67hM4h7JPwMSvUpIrMYDiJSjIA9L
er/qSw1/Pypx22uWMHmAZWRAgvLPtAQrB0Wqk4kCgYEAr2H1PvfbwZwkSvlMt5o8
WLnBbxcM3AKglLRbkShxxgiZYdEP71/uOtRMiL26du5XX8evItITN0DsvmXL/kcd
h29LK7LM5uLw7efz0Qxs03G6kEyIHVkacowHi5I5Ul1qI61SoV3yMB1TjIU+bXZt
0ZjC07totO0fqPOLQxonjQg=
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIEmDCCAwCgAwIBAgIQVEJFCgU/CZk9JEwTucWPpzANBgkqhkiG9w0BAQsFADCB
hTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMS0wKwYDVQQLDCRsdWNp
b0BMdWNpb3MtV29yay1NQlAgKEx1Y2lvIEZyYW5jbykxNDAyBgNVBAMMK21rY2Vy
dCBsdWNpb0BMdWNpb3MtV29yay1NQlAgKEx1Y2lvIEZyYW5jbykwHhcNMTkwNjAx
MDAwMDAwWhcNMjkwOTI5MjMzNTM0WjBYMScwJQYDVQQKEx5ta2NlcnQgZGV2ZWxv
cG1lbnQgY2VydGlmaWNhdGUxLTArBgNVBAsMJGx1Y2lvQEx1Y2lvcy1Xb3JrLU1C
UCAoTHVjaW8gRnJhbmNvKTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
APKm1szJhbO2C1q3UxcszMiRpB155VuzVj+oKkB3Bl79jlmlvOMYiMLFsHq81FKc
RtgqnK0oc/Nh8zvx3wrxs++mepciJNcVPQlCXDU4csLV+vh7n3wYm6YCakxgWu2q
/R9uZQVO5AZsfF44fa+OLRu+vc0pm5sx+QRk/dfiw1dehci5bYtpLw4wYqglp4IG
91uzMSJIaP+/0GOkzTItMfz000xfVP4W1Mqe5dqDmDumvqXMmPzf4zMAbLic6Ox5
K5SKa6LIb3T8Lm0DnALRCHlFsOeQViELjgrGh/+Lr6Od9q14g82IgjBBX7Z/ob9o
rWGCN6RPXSb96hFgNN/57lkCAwEAAaOBrzCBrDAOBgNVHQ8BAf8EBAMCBaAwEwYD
VR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQdvlE4
Bdcsjc9oaxjDCRu5FiuZkzBWBgNVHREETzBNggtleGFtcGxlLmNvbYINKi5leGFt
cGxlLmNvbYIMZXhhbXBsZS50ZXN0gglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAA
AAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggGBAKb2TJ8l+e1eraNwZWizLw5fccAf
y59J1JAWdLxZyAI/bkiTlVO3DQoPZpw7XwLhefCvILkwKAL4TtIGGVC9yTb5Q5eg
rqGO3FC0yg1fn65Kf1VpVxxUVyoiM5PQ4pFJb4AicAv88rCOLD9FFuE0PKOKU/dm
Tw0WgPStoh9wsJ1RXUuTJYZs1nd1kMBlfv9NbLilnL+cR2sLktS54X5XagsBYVlf
oapRb0JtABOoQhX3U8QMq8UF8yzceRHNTN9yfLOUrW26s9nKtlWVniNhw1uPxZw9
RHM7w9/4+a9LXtEDYg4IP/1mm0ywBoUqy1O6hA73uId+Yi/kFBks/GyYaGjKgYcO
23B75tkPGYEdGuGZYLzZNHbXg4V0UxFQG3KA1pUiSnD3bN2Rxs+CMpzORnOeK3xi
EooKgAPYsehItoQOMPpccI2xHdSAMWtwUgOKrefUQujkx2Op+KFlspF0+WJ6AZEe
2D4hyWaEZsvvILXapwqHDCuN3/jSUlTIqUoE1w==
-----END CERTIFICATE-----

View File

@@ -180,7 +180,7 @@ mod tests {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
flow_name: "my_flow".to_string(),
raw_sql: "sql".to_string(),
expire_when: "expire".to_string(),
expire_after: Some(300),
comment: "comment".to_string(),
options: Default::default(),
},

View File

@@ -18,6 +18,7 @@ use std::collections::BTreeMap;
use api::v1::flow::flow_request::Body as PbFlowRequest;
use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader};
use api::v1::ExpireAfter;
use async_trait::async_trait;
use common_catalog::format_full_flow_name;
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
@@ -283,7 +284,7 @@ impl From<&CreateFlowData> for CreateRequest {
sink_table_name: Some(value.task.sink_table_name.clone().into()),
// Always be true
create_if_not_exists: true,
expire_when: value.task.expire_when.clone(),
expire_after: value.task.expire_after.map(|value| ExpireAfter { value }),
comment: value.task.comment.clone(),
sql: value.task.sql.clone(),
flow_options: value.task.flow_options.clone(),
@@ -297,7 +298,7 @@ impl From<&CreateFlowData> for FlowInfoValue {
catalog_name,
flow_name,
sink_table_name,
expire_when,
expire_after,
comment,
sql,
flow_options: options,
@@ -318,7 +319,7 @@ impl From<&CreateFlowData> for FlowInfoValue {
catalog_name,
flow_name,
raw_sql: sql,
expire_when,
expire_after,
comment,
options,
}

View File

@@ -30,10 +30,10 @@ use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
use crate::rpc::ddl::CreateViewTask;
use crate::{metrics, ClusterId};
// The proceudure to execute `[CreateViewTask]`.
// The procedure to execute `[CreateViewTask]`.
pub struct CreateViewProcedure {
pub context: DdlContext,
pub creator: ViewCreator,
pub data: CreateViewData,
}
impl CreateViewProcedure {
@@ -42,24 +42,27 @@ impl CreateViewProcedure {
pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
Self {
context,
creator: ViewCreator::new(cluster_id, task),
data: CreateViewData {
state: CreateViewState::Prepare,
cluster_id,
task,
need_update: false,
},
}
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
let creator = ViewCreator { data };
Ok(CreateViewProcedure { context, creator })
Ok(CreateViewProcedure { context, data })
}
fn view_info(&self) -> &RawTableInfo {
&self.creator.data.task.view_info
&self.data.task.view_info
}
fn need_update(&self) -> bool {
self.creator.data.need_update
self.data.need_update
}
pub(crate) fn view_id(&self) -> TableId {
@@ -68,7 +71,7 @@ impl CreateViewProcedure {
#[cfg(any(test, feature = "testing"))]
pub fn set_allocated_metadata(&mut self, view_id: TableId) {
self.creator.set_allocated_metadata(view_id, false)
self.data.set_allocated_metadata(view_id, false)
}
/// On the prepare step, it performs:
@@ -79,7 +82,7 @@ impl CreateViewProcedure {
/// - ViewName exists and `create_if_not_exists` is false.
/// - Failed to allocate [ViewMetadata].
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
let expr = &self.creator.data.task.create_view;
let expr = &self.data.task.create_view;
let view_name_value = self
.context
.table_metadata_manager
@@ -102,7 +105,7 @@ impl CreateViewProcedure {
ensure!(
expr.create_if_not_exists || expr.or_replace,
error::ViewAlreadyExistsSnafu {
view_name: self.creator.data.table_ref().to_string(),
view_name: self.data.table_ref().to_string(),
}
);
@@ -122,18 +125,18 @@ impl CreateViewProcedure {
.get(view_id)
.await?
.with_context(|| error::TableInfoNotFoundSnafu {
table: self.creator.data.table_ref().to_string(),
table: self.data.table_ref().to_string(),
})?;
// Ensure the exists one is view, we can't replace a table.
ensure!(
view_info_value.table_info.table_type == TableType::View,
error::TableAlreadyExistsSnafu {
table_name: self.creator.data.table_ref().to_string(),
table_name: self.data.table_ref().to_string(),
}
);
self.creator.set_allocated_metadata(view_id, true);
self.data.set_allocated_metadata(view_id, true);
} else {
// Allocate the new `view_id`.
let TableMetadata { table_id, .. } = self
@@ -141,15 +144,15 @@ impl CreateViewProcedure {
.table_metadata_allocator
.create_view(
&TableMetadataAllocatorContext {
cluster_id: self.creator.data.cluster_id,
cluster_id: self.data.cluster_id,
},
&None,
)
.await?;
self.creator.set_allocated_metadata(table_id, false);
self.data.set_allocated_metadata(table_id, false);
}
self.creator.data.state = CreateViewState::CreateMetadata;
self.data.state = CreateViewState::CreateMetadata;
Ok(Status::executing(true))
}
@@ -169,9 +172,9 @@ impl CreateViewProcedure {
.get(view_id)
.await?
.with_context(|| error::ViewNotFoundSnafu {
view_name: self.creator.data.table_ref().to_string(),
view_name: self.data.table_ref().to_string(),
})?;
let new_logical_plan = self.creator.data.task.raw_logical_plan().clone();
let new_logical_plan = self.data.task.raw_logical_plan().clone();
manager
.update_view_info(view_id, &current_view_info, new_logical_plan)
.await?;
@@ -180,7 +183,7 @@ impl CreateViewProcedure {
} else {
let raw_view_info = self.view_info().clone();
manager
.create_view_metadata(raw_view_info, self.creator.data.task.raw_logical_plan())
.create_view_metadata(raw_view_info, self.data.task.raw_logical_plan())
.await?;
info!(
@@ -200,7 +203,7 @@ impl Procedure for CreateViewProcedure {
}
async fn execute(&mut self, ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &self.creator.data.state;
let state = &self.data.state;
let _timer = metrics::METRIC_META_PROCEDURE_CREATE_VIEW
.with_label_values(&[state.as_ref()])
@@ -214,11 +217,11 @@ impl Procedure for CreateViewProcedure {
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.creator.data).context(ToJsonSnafu)
serde_json::to_string(&self.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
let table_ref = &self.creator.data.table_ref();
let table_ref = &self.data.table_ref();
LockKey::new(vec![
CatalogLock::Read(table_ref.catalog).into(),
@@ -228,30 +231,6 @@ impl Procedure for CreateViewProcedure {
}
}
/// The VIEW creator
pub struct ViewCreator {
/// The serializable data.
pub data: CreateViewData,
}
impl ViewCreator {
pub fn new(cluster_id: u64, task: CreateViewTask) -> Self {
Self {
data: CreateViewData {
state: CreateViewState::Prepare,
cluster_id,
task,
need_update: false,
},
}
}
fn set_allocated_metadata(&mut self, view_id: TableId, need_update: bool) {
self.data.task.view_info.ident.table_id = view_id;
self.data.need_update = need_update;
}
}
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
pub enum CreateViewState {
/// Prepares to create the table
@@ -270,6 +249,11 @@ pub struct CreateViewData {
}
impl CreateViewData {
fn set_allocated_metadata(&mut self, view_id: TableId, need_update: bool) {
self.task.view_info.ident.table_id = view_id;
self.need_update = need_update;
}
fn table_ref(&self) -> TableReference<'_> {
self.task.table_ref()
}

View File

@@ -184,10 +184,10 @@ impl TableMetadataAllocator {
pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
/// [PeerAllocator] allocates [Peer]s for creating regions.
/// [`PeerAllocator`] allocates [`Peer`]s for creating regions.
#[async_trait]
pub trait PeerAllocator: Send + Sync {
/// Allocates `regions` size [Peer]s.
/// Allocates `regions` size [`Peer`]s.
async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
-> Result<Vec<Peer>>;
}

View File

@@ -44,7 +44,7 @@ pub(crate) fn test_create_flow_task(
sink_table_name,
or_replace: false,
create_if_not_exists,
expire_when: "".to_string(),
expire_after: Some(300),
comment: "".to_string(),
sql: "raw_sql".to_string(),
flow_options: Default::default(),

View File

@@ -328,7 +328,7 @@ mod tests {
sink_table_name,
flownode_ids,
raw_sql: "raw".to_string(),
expire_when: "expr".to_string(),
expire_after: Some(300),
comment: "hi".to_string(),
options: Default::default(),
}
@@ -420,7 +420,7 @@ mod tests {
sink_table_name: another_sink_table_name,
flownode_ids: [(0, 1u64)].into(),
raw_sql: "raw".to_string(),
expire_when: "expr".to_string(),
expire_after: Some(300),
comment: "hi".to_string(),
options: Default::default(),
};

View File

@@ -123,7 +123,8 @@ pub struct FlowInfoValue {
/// The raw sql.
pub(crate) raw_sql: String,
/// The expr of expire.
pub(crate) expire_when: String,
/// Duration in seconds as `i64`.
pub(crate) expire_after: Option<i64>,
/// The comment.
pub(crate) comment: String,
/// The options.

View File

@@ -28,8 +28,8 @@ use api::v1::meta::{
};
use api::v1::{
AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, QueryContext as PbQueryContext,
TruncateTableExpr,
DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
QueryContext as PbQueryContext, TruncateTableExpr,
};
use base64::engine::general_purpose;
use base64::Engine as _;
@@ -898,7 +898,8 @@ pub struct CreateFlowTask {
pub sink_table_name: TableName,
pub or_replace: bool,
pub create_if_not_exists: bool,
pub expire_when: String,
/// Duration in seconds. Data older than this duration will not be used.
pub expire_after: Option<i64>,
pub comment: String,
pub sql: String,
pub flow_options: HashMap<String, String>,
@@ -915,7 +916,7 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
sink_table_name,
or_replace,
create_if_not_exists,
expire_when,
expire_after,
comment,
sql,
flow_options,
@@ -934,7 +935,7 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
.into(),
or_replace,
create_if_not_exists,
expire_when,
expire_after: expire_after.map(|e| e.value),
comment,
sql,
flow_options,
@@ -951,7 +952,7 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
sink_table_name,
or_replace,
create_if_not_exists,
expire_when,
expire_after,
comment,
sql,
flow_options,
@@ -965,7 +966,7 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
sink_table_name: Some(sink_table_name.into()),
or_replace,
create_if_not_exists,
expire_when,
expire_after: expire_after.map(|value| ExpireAfter { value }),
comment,
sql,
flow_options,

View File

@@ -36,7 +36,7 @@ use crate::error::{
};
use crate::local::runner::Runner;
use crate::procedure::{BoxedProcedureLoader, InitProcedureState};
use crate::store::{ProcedureMessage, ProcedureStore, StateStoreRef};
use crate::store::{ProcedureMessage, ProcedureMessages, ProcedureStore, StateStoreRef};
use crate::{
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
ProcedureWithId, Watcher,
@@ -534,8 +534,11 @@ impl LocalManager {
info!("LocalManager start to recover");
let recover_start = Instant::now();
let (messages, rollback_messages, finished_ids) =
self.procedure_store.load_messages().await?;
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = self.procedure_store.load_messages().await?;
// Submits recovered messages first.
self.submit_recovered_messages(rollback_messages, InitProcedureState::RollingBack);
self.submit_recovered_messages(messages, InitProcedureState::Running);

View File

@@ -55,6 +55,17 @@ pub struct ProcedureMessage {
pub error: Option<String>,
}
/// A collection of all procedures' messages.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ProcedureMessages {
/// A map of uncommitted procedures
pub messages: HashMap<ProcedureId, ProcedureMessage>,
/// A map of rolling back procedures
pub rollback_messages: HashMap<ProcedureId, ProcedureMessage>,
/// A list of finished procedures' ids
pub finished_ids: Vec<ProcedureId>,
}
/// Procedure storage layer.
pub(crate) struct ProcedureStore {
proc_path: String,
@@ -182,17 +193,7 @@ impl ProcedureStore {
}
/// Load procedures from the storage.
/// Returns:
/// - a map of uncommitted procedures
/// - a map of rolling back procedures
/// - a list of finished procedures' ids
pub(crate) async fn load_messages(
&self,
) -> Result<(
HashMap<ProcedureId, ProcedureMessage>,
HashMap<ProcedureId, ProcedureMessage>,
Vec<ProcedureId>,
)> {
pub(crate) async fn load_messages(&self) -> Result<ProcedureMessages> {
// Track the key-value pair by procedure id.
let mut procedure_key_values: HashMap<_, (ParsedKey, Vec<u8>)> = HashMap::new();
@@ -242,7 +243,11 @@ impl ProcedureStore {
}
}
Ok((messages, rollback_messages, finished_ids))
Ok(ProcedureMessages {
messages,
rollback_messages,
finished_ids,
})
}
fn load_one_message(&self, key: &ParsedKey, value: &[u8]) -> Option<ProcedureMessage> {
@@ -515,10 +520,14 @@ mod tests {
.await
.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert_eq!(1, messages.len());
assert!(rollback_messages.is_empty());
assert!(finished.is_empty());
assert!(finished_ids.is_empty());
let msg = messages.get(&procedure_id).unwrap();
let expect = ProcedureMessage {
type_name: "MockProcedure".to_string(),
@@ -545,10 +554,14 @@ mod tests {
.unwrap();
store.commit_procedure(procedure_id, 1).await.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert!(messages.is_empty());
assert!(rollback_messages.is_empty());
assert_eq!(&[procedure_id], &finished[..]);
assert_eq!(&[procedure_id], &finished_ids[..]);
}
#[tokio::test]
@@ -582,10 +595,14 @@ mod tests {
.await
.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert!(messages.is_empty());
assert_eq!(1, rollback_messages.len());
assert!(finished.is_empty());
assert!(finished_ids.is_empty());
assert!(rollback_messages.contains_key(&procedure_id));
}
@@ -611,10 +628,14 @@ mod tests {
store.delete_procedure(procedure_id).await.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert!(messages.is_empty());
assert!(rollback_messages.is_empty());
assert!(finished.is_empty());
assert!(finished_ids.is_empty());
}
#[tokio::test]
@@ -642,10 +663,14 @@ mod tests {
store.delete_procedure(procedure_id).await.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert!(messages.is_empty());
assert!(rollback_messages.is_empty());
assert!(finished.is_empty());
assert!(finished_ids.is_empty());
}
#[tokio::test]
@@ -705,10 +730,14 @@ mod tests {
.await
.unwrap();
let (messages, rollback_messages, finished) = store.load_messages().await.unwrap();
let ProcedureMessages {
messages,
rollback_messages,
finished_ids,
} = store.load_messages().await.unwrap();
assert_eq!(2, messages.len());
assert!(rollback_messages.is_empty());
assert_eq!(1, finished.len());
assert_eq!(1, finished_ids.len());
let msg = messages.get(&id0).unwrap();
assert_eq!("id0-2", msg.data);

View File

@@ -23,7 +23,6 @@ use datatypes::prelude::ConcreteDataType;
pub use expr::build_filter_from_timestamp;
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
pub use self::expr::{DfExpr, Expr};
pub use self::udaf::AggregateFunction;
pub use self::udf::ScalarUdf;
use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation};

View File

@@ -16,28 +16,9 @@ use common_time::range::TimestampRange;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datafusion_common::{Column, ScalarValue};
pub use datafusion_expr::expr::Expr as DfExpr;
use datafusion_expr::expr::Expr;
use datafusion_expr::{and, binary_expr, Operator};
/// Central struct of query API.
/// Represent logical expressions such as `A + 1`, or `CAST(c1 AS int)`.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Expr {
df_expr: DfExpr,
}
impl Expr {
pub fn df_expr(&self) -> &DfExpr {
&self.df_expr
}
}
impl From<DfExpr> for Expr {
fn from(df_expr: DfExpr) -> Self {
Self { df_expr }
}
}
/// Builds an `Expr` that filters timestamp column from given timestamp range.
/// Returns [None] if time range is [None] or full time range.
pub fn build_filter_from_timestamp(
@@ -45,12 +26,12 @@ pub fn build_filter_from_timestamp(
time_range: Option<&TimestampRange>,
) -> Option<Expr> {
let time_range = time_range?;
let ts_col_expr = DfExpr::Column(Column {
let ts_col_expr = Expr::Column(Column {
relation: None,
name: ts_col_name.to_string(),
});
let df_expr = match (time_range.start(), time_range.end()) {
match (time_range.start(), time_range.end()) {
(None, None) => None,
(Some(start), None) => Some(binary_expr(
ts_col_expr,
@@ -70,20 +51,18 @@ pub fn build_filter_from_timestamp(
),
binary_expr(ts_col_expr, Operator::Lt, timestamp_to_literal(end)),
)),
};
df_expr.map(Expr::from)
}
}
/// Converts a [Timestamp] to datafusion literal value.
fn timestamp_to_literal(timestamp: &Timestamp) -> DfExpr {
fn timestamp_to_literal(timestamp: &Timestamp) -> Expr {
let scalar_value = match timestamp.unit() {
TimeUnit::Second => ScalarValue::TimestampSecond(Some(timestamp.value()), None),
TimeUnit::Millisecond => ScalarValue::TimestampMillisecond(Some(timestamp.value()), None),
TimeUnit::Microsecond => ScalarValue::TimestampMicrosecond(Some(timestamp.value()), None),
TimeUnit::Nanosecond => ScalarValue::TimestampNanosecond(Some(timestamp.value()), None),
};
DfExpr::Literal(scalar_value)
Expr::Literal(scalar_value)
}
#[cfg(test)]
@@ -91,11 +70,21 @@ mod tests {
use super::*;
#[test]
fn test_from_df_expr() {
let df_expr = DfExpr::Wildcard { qualifier: None };
fn test_timestamp_to_literal() {
let timestamp = Timestamp::new(123456789, TimeUnit::Second);
let expected = Expr::Literal(ScalarValue::TimestampSecond(Some(123456789), None));
assert_eq!(timestamp_to_literal(&timestamp), expected);
let expr: Expr = df_expr.into();
let timestamp = Timestamp::new(123456789, TimeUnit::Millisecond);
let expected = Expr::Literal(ScalarValue::TimestampMillisecond(Some(123456789), None));
assert_eq!(timestamp_to_literal(&timestamp), expected);
assert_eq!(DfExpr::Wildcard { qualifier: None }, *expr.df_expr());
let timestamp = Timestamp::new(123456789, TimeUnit::Microsecond);
let expected = Expr::Literal(ScalarValue::TimestampMicrosecond(Some(123456789), None));
assert_eq!(timestamp_to_literal(&timestamp), expected);
let timestamp = Timestamp::new(123456789, TimeUnit::Nanosecond);
let expected = Expr::Literal(ScalarValue::TimestampNanosecond(Some(123456789), None));
assert_eq!(timestamp_to_literal(&timestamp), expected);
}
}

View File

@@ -16,7 +16,7 @@ pub use datafusion_common::ScalarValue;
pub use crate::columnar_value::ColumnarValue;
pub use crate::function::*;
pub use crate::logical_plan::{create_udf, AggregateFunction, Expr, ScalarUdf};
pub use crate::logical_plan::{create_udf, AggregateFunction, ScalarUdf};
pub use crate::signature::{Signature, TypeSignature, Volatility};
/// Default timestamp column name for Prometheus metrics.

View File

@@ -14,7 +14,7 @@
//! Util record batch stream wrapper that can perform precise filter.
use datafusion::logical_expr::{Expr, Operator};
use datafusion::logical_expr::{Expr, Literal, Operator};
use datafusion_common::arrow::array::{ArrayRef, Datum, Scalar};
use datafusion_common::arrow::buffer::BooleanBuffer;
use datafusion_common::arrow::compute::kernels::cmp;
@@ -43,6 +43,28 @@ pub struct SimpleFilterEvaluator {
}
impl SimpleFilterEvaluator {
pub fn new<T: Literal>(column_name: String, lit: T, op: Operator) -> Option<Self> {
match op {
Operator::Eq
| Operator::NotEq
| Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq => {}
_ => return None,
}
let Expr::Literal(val) = lit.lit() else {
return None;
};
Some(Self {
column_name,
literal: val.to_scalar().ok()?,
op,
})
}
pub fn try_new(predicate: &Expr) -> Option<Self> {
match predicate {
Expr::BinaryExpr(binary) => {

View File

@@ -124,139 +124,144 @@ pub fn init_global_logging(
tracing_opts: &TracingOptions,
node_id: Option<String>,
) -> Vec<WorkerGuard> {
static START: Once = Once::new();
let mut guards = vec![];
let dir = &opts.dir;
let level = &opts.level;
let enable_otlp_tracing = opts.enable_otlp_tracing;
// Enable log compatible layer to convert log record to tracing span.
LogTracer::init().expect("log tracer must be valid");
START.call_once(|| {
let dir = &opts.dir;
let level = &opts.level;
let enable_otlp_tracing = opts.enable_otlp_tracing;
// stdout log layer.
let stdout_logging_layer = if opts.append_stdout {
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(stdout_guard);
// Enable log compatible layer to convert log record to tracing span.
LogTracer::init().expect("log tracer must be valid");
Some(
Layer::new()
.with_writer(stdout_writer)
.with_ansi(atty::is(atty::Stream::Stdout)),
)
} else {
None
};
// file log layer.
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
let (rolling_writer, rolling_writer_guard) = tracing_appender::non_blocking(rolling_appender);
let file_logging_layer = Layer::new().with_writer(rolling_writer).with_ansi(false);
guards.push(rolling_writer_guard);
// error file log layer.
let err_rolling_appender =
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
let (err_rolling_writer, err_rolling_writer_guard) =
tracing_appender::non_blocking(err_rolling_appender);
let err_file_logging_layer = Layer::new()
.with_writer(err_rolling_writer)
.with_ansi(false);
guards.push(err_rolling_writer_guard);
// resolve log level settings from:
// - options from command line or config files
// - environment variable: RUST_LOG
// - default settings
let rust_log_env = std::env::var(EnvFilter::DEFAULT_ENV).ok();
let targets_string = level
.as_deref()
.or(rust_log_env.as_deref())
.unwrap_or(DEFAULT_LOG_TARGETS);
let filter = targets_string
.parse::<filter::Targets>()
.expect("error parsing log level string");
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
// Must enable 'tokio_unstable' cfg to use this feature.
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
#[cfg(feature = "tokio-console")]
let subscriber = {
let tokio_console_layer = if let Some(tokio_console_addr) = &tracing_opts.tokio_console_addr
{
let addr: std::net::SocketAddr = tokio_console_addr.parse().unwrap_or_else(|e| {
panic!("Invalid binding address '{tokio_console_addr}' for tokio-console: {e}");
});
println!("tokio-console listening on {addr}");
// stdout log layer.
let stdout_logging_layer = if opts.append_stdout {
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(stdout_guard);
Some(
console_subscriber::ConsoleLayer::builder()
.server_addr(addr)
.spawn(),
Layer::new()
.with_writer(stdout_writer)
.with_ansi(atty::is(atty::Stream::Stdout)),
)
} else {
None
};
let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
// file log layer.
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
let (rolling_writer, rolling_writer_guard) =
tracing_appender::non_blocking(rolling_appender);
let file_logging_layer = Layer::new().with_writer(rolling_writer).with_ansi(false);
guards.push(rolling_writer_guard);
let file_logging_layer = file_logging_layer.with_filter(filter);
// error file log layer.
let err_rolling_appender =
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
let (err_rolling_writer, err_rolling_writer_guard) =
tracing_appender::non_blocking(err_rolling_appender);
let err_file_logging_layer = Layer::new()
.with_writer(err_rolling_writer)
.with_ansi(false);
guards.push(err_rolling_writer_guard);
Registry::default()
.with(tokio_console_layer)
// resolve log level settings from:
// - options from command line or config files
// - environment variable: RUST_LOG
// - default settings
let rust_log_env = std::env::var(EnvFilter::DEFAULT_ENV).ok();
let targets_string = level
.as_deref()
.or(rust_log_env.as_deref())
.unwrap_or(DEFAULT_LOG_TARGETS);
let filter = targets_string
.parse::<filter::Targets>()
.expect("error parsing log level string");
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
// Must enable 'tokio_unstable' cfg to use this feature.
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
#[cfg(feature = "tokio-console")]
let subscriber = {
let tokio_console_layer =
if let Some(tokio_console_addr) = &tracing_opts.tokio_console_addr {
let addr: std::net::SocketAddr = tokio_console_addr.parse().unwrap_or_else(|e| {
panic!("Invalid binding address '{tokio_console_addr}' for tokio-console: {e}");
});
println!("tokio-console listening on {addr}");
Some(
console_subscriber::ConsoleLayer::builder()
.server_addr(addr)
.spawn(),
)
} else {
None
};
let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
let file_logging_layer = file_logging_layer.with_filter(filter);
Registry::default()
.with(tokio_console_layer)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
};
// consume the `tracing_opts`, to avoid "unused" warnings
let _ = tracing_opts;
#[cfg(not(feature = "tokio-console"))]
let subscriber = Registry::default()
.with(filter)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
};
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
// consume the `tracing_opts`, to avoid "unused" warnings
let _ = tracing_opts;
#[cfg(not(feature = "tokio-console"))]
let subscriber = Registry::default()
.with(filter)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
if enable_otlp_tracing {
global::set_text_map_propagator(TraceContextPropagator::new());
// otlp exporter
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter().tonic().with_endpoint(
opts.otlp_endpoint
.as_ref()
.map(|e| format!("http://{}", e))
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
),
)
.with_trace_config(
opentelemetry_sdk::trace::config()
.with_sampler(sampler)
.with_resource(opentelemetry_sdk::Resource::new(vec![
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(
resource::SERVICE_INSTANCE_ID,
node_id.unwrap_or("none".to_string()),
),
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
])),
)
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("otlp tracer install failed");
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
let subscriber = subscriber.with(tracing_layer);
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
} else {
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
}
if enable_otlp_tracing {
global::set_text_map_propagator(TraceContextPropagator::new());
// otlp exporter
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter().tonic().with_endpoint(
opts.otlp_endpoint
.as_ref()
.map(|e| format!("http://{}", e))
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
),
)
.with_trace_config(
opentelemetry_sdk::trace::config()
.with_sampler(sampler)
.with_resource(opentelemetry_sdk::Resource::new(vec![
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(
resource::SERVICE_INSTANCE_ID,
node_id.unwrap_or("none".to_string()),
),
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
])),
)
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("otlp tracer install failed");
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
let subscriber = subscriber.with(tracing_layer);
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
} else {
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
}
});
guards
}

View File

@@ -367,6 +367,19 @@ pub enum Error {
#[snafu(source(from(common_config::error::Error, Box::new)))]
source: Box<common_config::error::Error>,
},
#[snafu(display(
"Failed to get region metadata from engine {} for region_id {}",
engine,
region_id,
))]
GetRegionMetadata {
engine: String,
region_id: RegionId,
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -433,7 +446,9 @@ impl ErrorExt for Error {
TableIdProviderNotFound { .. } | UnsupportedGrpcRequest { .. } => {
StatusCode::Unsupported
}
HandleRegionRequest { source, .. } => source.status_code(),
HandleRegionRequest { source, .. } | GetRegionMetadata { source, .. } => {
source.status_code()
}
StopRegionEngine { source, .. } => source.status_code(),
FindLogicalRegions { source, .. } => source.status_code(),

View File

@@ -109,13 +109,10 @@ impl HeartbeatTask {
let mut last_received_lease = Instant::now();
let _handle = common_runtime::spawn_bg(async move {
while let Some(res) = match rx.message().await {
Ok(m) => m,
Err(e) => {
error!(e; "Error while reading heartbeat response");
None
}
} {
while let Some(res) = rx.message().await.unwrap_or_else(|e| {
error!(e; "Error while reading heartbeat response");
None
}) {
if let Some(msg) = res.mailbox_message.as_ref() {
info!("Received mailbox message: {msg:?}, meta_client id: {client_id:?}");
}
@@ -238,7 +235,7 @@ impl HeartbeatTask {
Some(req)
}
Err(e) => {
error!(e;"Failed to encode mailbox messages!");
error!(e; "Failed to encode mailbox messages!");
None
}
}
@@ -276,7 +273,7 @@ impl HeartbeatTask {
if let Some(req) = req {
debug!("Sending heartbeat request: {:?}", req);
if let Err(e) = tx.send(req).await {
error!("Failed to send heartbeat to metasrv, error: {:?}", e);
error!(e; "Failed to send heartbeat to metasrv");
match Self::create_streams(
&meta_client,
running.clone(),
@@ -301,7 +298,7 @@ impl HeartbeatTask {
Instant::now()
+ Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS),
);
error!(e;"Failed to reconnect to metasrv!");
error!(e; "Failed to reconnect to metasrv!");
}
}
}

View File

@@ -219,6 +219,7 @@ impl RegionServerHandler for RegionServer {
.context(BuildRegionRequestsSnafu)
.map_err(BoxedError::new)
.context(ExecuteGrpcRequestSnafu)?;
let tracing_context = TracingContext::from_current_span();
let results = if is_parallel {

View File

@@ -21,6 +21,7 @@ use servers::grpc::{GrpcServer, GrpcServerConfig};
use servers::http::HttpServerBuilder;
use servers::metrics_handler::MetricsHandler;
use servers::server::{ServerHandler, ServerHandlers};
use servers::tls::TlsOption;
use snafu::ResultExt;
use crate::config::DatanodeOptions;
@@ -95,6 +96,7 @@ impl<'a> DatanodeServiceBuilder<'a> {
let config = GrpcServerConfig {
max_recv_message_size: opts.rpc_max_recv_message_size.as_bytes() as usize,
max_send_message_size: opts.rpc_max_send_message_size.as_bytes() as usize,
tls: TlsOption::default(),
};
GrpcServerBuilder::new(config, region_server.runtime())

View File

@@ -23,7 +23,6 @@ use common_function::function::FunctionRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_query::prelude::ScalarUdf;
use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
use common_runtime::Runtime;
use query::dataframe::DataFrame;
use query::plan::LogicalPlan;
@@ -32,7 +31,7 @@ use query::query_engine::DescribeResult;
use query::{QueryEngine, QueryEngineContext};
use session::context::QueryContextRef;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_engine::{RegionEngine, RegionRole, RegionScannerRef, SetReadonlyResponse};
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use table::TableRef;
@@ -193,7 +192,7 @@ impl RegionEngine for MockRegionEngine {
&self,
_region_id: RegionId,
_request: ScanRequest,
) -> Result<SendableRecordBatchStream, BoxedError> {
) -> Result<RegionScannerRef, BoxedError> {
unimplemented!()
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub type BinaryArray = arrow::array::LargeBinaryArray;
pub type MutableBinaryArray = arrow::array::LargeBinaryBuilder;
pub type BinaryArray = arrow::array::BinaryArray;
pub type MutableBinaryArray = arrow::array::BinaryBuilder;
pub type StringArray = arrow::array::StringArray;
pub type MutableStringArray = arrow::array::StringBuilder;

View File

@@ -47,7 +47,7 @@ impl DataType for BinaryType {
}
fn as_arrow_type(&self) -> ArrowDataType {
ArrowDataType::LargeBinary
ArrowDataType::Binary
}
fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {

View File

@@ -342,7 +342,7 @@ impl Value {
Value::Float32(v) => ScalarValue::Float32(Some(v.0)),
Value::Float64(v) => ScalarValue::Float64(Some(v.0)),
Value::String(v) => ScalarValue::Utf8(Some(v.as_utf8().to_string())),
Value::Binary(v) => ScalarValue::LargeBinary(Some(v.to_vec())),
Value::Binary(v) => ScalarValue::Binary(Some(v.to_vec())),
Value::Date(v) => ScalarValue::Date32(Some(v.val())),
Value::DateTime(v) => ScalarValue::Date64(Some(v.val())),
Value::Null => to_null_scalar_value(output_type)?,
@@ -413,7 +413,7 @@ pub fn to_null_scalar_value(output_type: &ConcreteDataType) -> Result<ScalarValu
ConcreteDataType::UInt64(_) => ScalarValue::UInt64(None),
ConcreteDataType::Float32(_) => ScalarValue::Float32(None),
ConcreteDataType::Float64(_) => ScalarValue::Float64(None),
ConcreteDataType::Binary(_) => ScalarValue::LargeBinary(None),
ConcreteDataType::Binary(_) => ScalarValue::Binary(None),
ConcreteDataType::String(_) => ScalarValue::Utf8(None),
ConcreteDataType::Date(_) => ScalarValue::Date32(None),
ConcreteDataType::DateTime(_) => ScalarValue::Date64(None),
@@ -2105,7 +2105,7 @@ mod tests {
.unwrap()
);
assert_eq!(
ScalarValue::LargeBinary(Some("world".as_bytes().to_vec())),
ScalarValue::Binary(Some("world".as_bytes().to_vec())),
Value::Binary(Bytes::from("world".as_bytes()))
.try_to_scalar_value(&ConcreteDataType::binary_datatype())
.unwrap()
@@ -2187,7 +2187,7 @@ mod tests {
.unwrap()
);
assert_eq!(
ScalarValue::LargeBinary(None),
ScalarValue::Binary(None),
Value::Null
.try_to_scalar_value(&ConcreteDataType::binary_datatype())
.unwrap()

View File

@@ -52,6 +52,14 @@ impl From<Vec<Option<Vec<u8>>>> for BinaryVector {
}
}
impl From<Vec<&[u8]>> for BinaryVector {
fn from(data: Vec<&[u8]>) -> Self {
Self {
array: BinaryArray::from_iter_values(data),
}
}
}
impl Vector for BinaryVector {
fn data_type(&self) -> ConcreteDataType {
ConcreteDataType::binary_datatype()
@@ -257,7 +265,7 @@ mod tests {
let arrow_arr = v.to_arrow_array();
assert_eq!(2, arrow_arr.len());
assert_eq!(&ArrowDataType::LargeBinary, arrow_arr.data_type());
assert_eq!(&ArrowDataType::Binary, arrow_arr.data_type());
}
#[test]

View File

@@ -258,9 +258,9 @@ impl Helper {
Ok(match array.as_ref().data_type() {
ArrowDataType::Null => Arc::new(NullVector::try_from_arrow_array(array)?),
ArrowDataType::Boolean => Arc::new(BooleanVector::try_from_arrow_array(array)?),
ArrowDataType::LargeBinary => Arc::new(BinaryVector::try_from_arrow_array(array)?),
ArrowDataType::FixedSizeBinary(_) | ArrowDataType::Binary => {
let array = arrow::compute::cast(array.as_ref(), &ArrowDataType::LargeBinary)
ArrowDataType::Binary => Arc::new(BinaryVector::try_from_arrow_array(array)?),
ArrowDataType::LargeBinary | ArrowDataType::FixedSizeBinary(_) => {
let array = arrow::compute::cast(array.as_ref(), &ArrowDataType::Binary)
.context(crate::error::ArrowComputeSnafu)?;
Arc::new(BinaryVector::try_from_arrow_array(array)?)
}
@@ -278,7 +278,7 @@ impl Helper {
ArrowDataType::LargeUtf8 => {
let array = arrow::compute::cast(array.as_ref(), &ArrowDataType::Utf8)
.context(crate::error::ArrowComputeSnafu)?;
Arc::new(BinaryVector::try_from_arrow_array(array)?)
Arc::new(StringVector::try_from_arrow_array(array)?)
}
ArrowDataType::Date32 => Arc::new(DateVector::try_from_arrow_array(array)?),
ArrowDataType::Date64 => Arc::new(DateTimeVector::try_from_arrow_array(array)?),
@@ -402,8 +402,10 @@ mod tests {
TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
TimestampSecondArray, UInt16Array, UInt32Array, UInt64Array, UInt8Array,
};
use arrow::buffer::Buffer;
use arrow::datatypes::Int32Type;
use arrow_array::DictionaryArray;
use arrow_array::{BinaryArray, DictionaryArray, FixedSizeBinaryArray, LargeStringArray};
use arrow_schema::DataType;
use common_decimal::Decimal128;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
@@ -576,10 +578,6 @@ mod tests {
fn test_try_into_vector() {
check_try_into_vector(NullArray::new(2));
check_try_into_vector(BooleanArray::from(vec![true, false]));
check_try_into_vector(LargeBinaryArray::from(vec![
"hello".as_bytes(),
"world".as_bytes(),
]));
check_try_into_vector(Int8Array::from(vec![1, 2, 3]));
check_try_into_vector(Int16Array::from(vec![1, 2, 3]));
check_try_into_vector(Int32Array::from(vec![1, 2, 3]));
@@ -611,6 +609,52 @@ mod tests {
Helper::try_into_vector(array).unwrap_err();
}
#[test]
fn test_try_binary_array_into_vector() {
let input_vec: Vec<&[u8]> = vec!["hello".as_bytes(), "world".as_bytes()];
let assertion_vector = BinaryVector::from(input_vec.clone());
let input_arrays: Vec<ArrayRef> = vec![
Arc::new(LargeBinaryArray::from(input_vec.clone())) as ArrayRef,
Arc::new(BinaryArray::from(input_vec.clone())) as ArrayRef,
Arc::new(FixedSizeBinaryArray::new(
5,
Buffer::from_vec("helloworld".as_bytes().to_vec()),
None,
)) as ArrayRef,
];
for input_array in input_arrays {
let vector = Helper::try_into_vector(input_array).unwrap();
assert_eq!(2, vector.len());
assert_eq!(0, vector.null_count());
let output_arrow_array: ArrayRef = vector.to_arrow_array();
assert_eq!(&DataType::Binary, output_arrow_array.data_type());
assert_eq!(&assertion_vector.to_arrow_array(), &output_arrow_array);
}
}
#[test]
fn test_large_string_array_into_vector() {
let input_vec = vec!["a", "b"];
let assertion_array = StringArray::from(input_vec.clone());
let large_string_array: ArrayRef = Arc::new(LargeStringArray::from(input_vec));
let vector = Helper::try_into_vector(large_string_array).unwrap();
assert_eq!(2, vector.len());
assert_eq!(0, vector.null_count());
let output_arrow_array: StringArray = vector
.to_arrow_array()
.as_any()
.downcast_ref::<StringArray>()
.unwrap()
.clone();
assert_eq!(&assertion_array, &output_arrow_array);
}
#[test]
fn test_try_from_scalar_time_value() {
let vector = Helper::try_from_scalar_value(ScalarValue::Time32Second(Some(42)), 3).unwrap();

View File

@@ -25,7 +25,9 @@ use common_telemetry::{error, info};
use object_store::ObjectStore;
use snafu::{ensure, OptionExt};
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_engine::{
RegionEngine, RegionRole, RegionScannerRef, SetReadonlyResponse, SinglePartitionScanner,
};
use store_api::region_request::{
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
RegionRequest,
@@ -49,6 +51,20 @@ impl FileRegionEngine {
inner: Arc::new(EngineInner::new(object_store)),
}
}
async fn handle_query(
&self,
region_id: RegionId,
request: ScanRequest,
) -> Result<SendableRecordBatchStream, BoxedError> {
self.inner
.get_region(region_id)
.await
.context(RegionNotFoundSnafu { region_id })
.map_err(BoxedError::new)?
.query(request)
.map_err(BoxedError::new)
}
}
#[async_trait]
@@ -72,14 +88,10 @@ impl RegionEngine for FileRegionEngine {
&self,
region_id: RegionId,
request: ScanRequest,
) -> Result<SendableRecordBatchStream, BoxedError> {
self.inner
.get_region(region_id)
.await
.context(RegionNotFoundSnafu { region_id })
.map_err(BoxedError::new)?
.query(request)
.map_err(BoxedError::new)
) -> Result<RegionScannerRef, BoxedError> {
let stream = self.handle_query(region_id, request).await?;
let scanner = Arc::new(SinglePartitionScanner::new(stream));
Ok(scanner)
}
async fn get_metadata(&self, region_id: RegionId) -> Result<RegionMetadataRef, BoxedError> {

View File

@@ -21,11 +21,11 @@ use std::task::{Context, Poll};
use common_datasource::object_store::build_backend;
use common_error::ext::BoxedError;
use common_query::prelude::Expr;
use common_recordbatch::adapter::RecordBatchMetrics;
use common_recordbatch::error::{CastVectorSnafu, ExternalSnafu, Result as RecordBatchResult};
use common_recordbatch::{OrderOption, RecordBatch, RecordBatchStream, SendableRecordBatchStream};
use datafusion::logical_expr::utils as df_logical_expr_utils;
use datafusion_expr::expr::Expr;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::VectorRef;
@@ -113,7 +113,7 @@ impl FileRegion {
let mut aux_column_set = HashSet::new();
for scan_filter in scan_filters {
df_logical_expr_utils::expr_to_columns(scan_filter.df_expr(), &mut aux_column_set)
df_logical_expr_utils::expr_to_columns(scan_filter, &mut aux_column_set)
.context(ExtractColumnFromFilterSnafu)?;
let all_file_columns = aux_column_set

View File

@@ -19,7 +19,6 @@ use common_datasource::file_format::json::{JsonFormat, JsonOpener};
use common_datasource::file_format::orc::{OrcFormat, OrcOpener};
use common_datasource::file_format::parquet::{DefaultParquetFileReaderFactory, ParquetFormat};
use common_datasource::file_format::Format;
use common_query::prelude::Expr;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::SendableRecordBatchStream;
use datafusion::common::{Statistics, ToDFSchema};
@@ -32,6 +31,7 @@ use datafusion::physical_expr::execution_props::ExecutionProps;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::SessionContext;
use datafusion_expr::expr::Expr;
use datafusion_expr::utils::conjunction;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::schema::SchemaRef;
@@ -182,10 +182,7 @@ fn new_parquet_stream_with_exec_plan(
};
// build predicate filter
let filters = filters
.iter()
.map(|f| f.df_expr().clone())
.collect::<Vec<_>>();
let filters = filters.to_vec();
let filters = if let Some(expr) = conjunction(filters) {
let df_schema = file_schema
.clone()

View File

@@ -26,6 +26,7 @@ use common_base::Plugins;
use common_error::ext::BoxedError;
use common_frontend::handler::FrontendInvoker;
use common_meta::key::TableMetadataManagerRef;
use common_query::prelude::GREPTIME_TIMESTAMP;
use common_runtime::JoinHandle;
use common_telemetry::{debug, info};
use datatypes::schema::ColumnSchema;
@@ -42,7 +43,6 @@ use tokio::sync::{oneshot, watch, Mutex, RwLock};
use crate::adapter::error::{ExternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
pub(crate) use crate::adapter::node_context::FlownodeContext;
use crate::adapter::parse_expr::parse_fixed;
use crate::adapter::table_source::TableSource;
use crate::adapter::util::column_schemas_to_proto;
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
@@ -282,7 +282,7 @@ impl FlownodeManager {
let schema = meta.schema.column_schemas;
let is_auto_create = schema
.last()
.map(|s| s.name == "__ts_placeholder")
.map(|s| s.name == GREPTIME_TIMESTAMP)
.unwrap_or(false);
(primary_keys, schema, is_auto_create)
} else {
@@ -303,6 +303,7 @@ impl FlownodeManager {
.clone();
// TODO(discord9): use default key from schema
let primary_keys = schema
.typ()
.keys
.first()
.map(|v| {
@@ -319,18 +320,25 @@ impl FlownodeManager {
);
// TODO(discord9): bugged so we can't infer time index from flow plan, so we have to manually set one
let ts_col = ColumnSchema::new(
"__ts_placeholder",
GREPTIME_TIMESTAMP,
ConcreteDataType::timestamp_millisecond_datatype(),
true,
)
.with_time_index(true);
let wout_ts = schema
.typ()
.column_types
.clone()
.into_iter()
.enumerate()
.map(|(idx, typ)| {
ColumnSchema::new(format!("Col_{idx}"), typ.scalar_type, typ.nullable)
let name = schema
.names
.get(idx)
.cloned()
.unwrap_or(format!("Col_{}", idx));
ColumnSchema::new(name, typ.scalar_type, typ.nullable)
})
.collect_vec();
@@ -505,26 +513,26 @@ impl FlownodeManager {
/// However this is not blocking and can sometimes return while actual computation is still running in worker thread
/// TODO(discord9): add flag for subgraph that have input since last run
pub async fn run_available(&self) -> Result<(), Error> {
let now = self.tick_manager.tick();
loop {
let now = self.tick_manager.tick();
for worker in self.worker_handles.iter() {
// TODO(discord9): consider how to handle error in individual worker
worker.lock().await.run_available(now).await.unwrap();
}
// first check how many inputs were sent
let send_cnt = match self.node_context.lock().await.flush_all_sender() {
Ok(cnt) => cnt,
match self.node_context.lock().await.flush_all_sender() {
Ok(_) => (),
Err(err) => {
common_telemetry::error!("Flush send buf errors: {:?}", err);
break;
}
};
// if no inputs
if send_cnt == 0 {
// if no thing in send buf then break
let buf_len = self.node_context.lock().await.get_send_buf_size();
if buf_len == 0 {
break;
} else {
debug!("FlownodeManager::run_available: send_cnt={}", send_cnt);
debug!("Send buf len = {}", buf_len);
}
}
@@ -544,6 +552,8 @@ impl FlownodeManager {
);
let table_id = region_id.table_id();
self.node_context.lock().await.send(table_id, rows)?;
// TODO(discord9): put it in a background task?
self.run_available().await?;
Ok(())
}
}
@@ -565,7 +575,7 @@ impl FlownodeManager {
/// Return task id if a new task is created, otherwise return None
///
/// steps to create task:
/// 1. parse query into typed plan(and optional parse expire_when expr)
/// 1. parse query into typed plan(and optional parse expire_after expr)
/// 2. render source/sink with output table id and used input table id
#[allow(clippy::too_many_arguments)]
pub async fn create_flow(
@@ -573,14 +583,14 @@ impl FlownodeManager {
flow_id: FlowId,
sink_table_name: TableName,
source_table_ids: &[TableId],
create_if_not_exist: bool,
expire_when: Option<String>,
create_if_not_exists: bool,
expire_after: Option<i64>,
comment: Option<String>,
sql: String,
flow_options: HashMap<String, String>,
query_ctx: Option<QueryContext>,
) -> Result<Option<FlowId>, Error> {
if create_if_not_exist {
if create_if_not_exists {
// check if the task already exists
for handle in self.worker_handles.iter() {
if handle.lock().await.contains_flow(flow_id).await? {
@@ -608,22 +618,6 @@ impl FlownodeManager {
debug!("Flow {:?}'s Plan is {:?}", flow_id, flow_plan);
node_ctx.assign_table_schema(&sink_table_name, flow_plan.typ.clone())?;
let expire_when = expire_when
.and_then(|s| {
if s.is_empty() || s.split_whitespace().join("").is_empty() {
None
} else {
Some(s)
}
})
.map(|d| {
let d = d.as_ref();
parse_fixed(d)
.map(|(_, n)| n)
.map_err(|err| err.to_string())
})
.transpose()
.map_err(|err| UnexpectedSnafu { reason: err }.build())?;
let _ = comment;
let _ = flow_options;
@@ -656,8 +650,8 @@ impl FlownodeManager {
sink_sender,
source_ids,
src_recvs: source_receivers,
expire_when,
create_if_not_exist,
expire_after,
create_if_not_exists,
err_collector,
};
handle.create_flow(create_request).await?;
@@ -670,21 +664,20 @@ impl FlownodeManager {
///
/// TODO(discord9): better way to do it, and not expose flow tick even to other flow to avoid
/// TSO coord mess
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct FlowTickManager {
start: Instant,
}
impl std::fmt::Debug for FlowTickManager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FlowTickManager").finish()
}
start_timestamp: repr::Timestamp,
}
impl FlowTickManager {
pub fn new() -> Self {
FlowTickManager {
start: Instant::now(),
start_timestamp: SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_millis() as repr::Timestamp,
}
}
@@ -694,6 +687,6 @@ impl FlowTickManager {
pub fn tick(&self) -> repr::Timestamp {
let current = Instant::now();
let since_the_epoch = current - self.start;
since_the_epoch.as_millis() as repr::Timestamp
since_the_epoch.as_millis() as repr::Timestamp + self.start_timestamp
}
}

View File

@@ -14,13 +14,17 @@
//! impl `FlowNode` trait for FlowNodeManager so standalone can call them
use std::collections::HashMap;
use api::v1::flow::{flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
use common_meta::node_manager::Flownode;
use common_telemetry::debug;
use itertools::Itertools;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
use crate::adapter::FlownodeManager;
use crate::repr::{self, DiffRow};
@@ -45,7 +49,7 @@ impl Flownode for FlownodeManager {
source_table_ids,
sink_table_name: Some(sink_table_name),
create_if_not_exists,
expire_when,
expire_after,
comment,
sql,
flow_options,
@@ -56,13 +60,14 @@ impl Flownode for FlownodeManager {
sink_table_name.schema_name,
sink_table_name.table_name,
];
let expire_after = expire_after.map(|e| e.value);
let ret = self
.create_flow(
task_id.id as u64,
sink_table_name,
&source_table_ids,
create_if_not_exists,
Some(expire_when),
expire_after,
Some(comment),
sql,
flow_options,
@@ -100,12 +105,57 @@ impl Flownode for FlownodeManager {
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
for write_request in request.requests {
let region_id = write_request.region_id;
let rows_proto = write_request.rows.map(|r| r.rows).unwrap_or(vec![]);
let table_id = RegionId::from(region_id).table_id();
let (insert_schema, rows_proto) = write_request
.rows
.map(|r| (r.schema, r.rows))
.unwrap_or_default();
// TODO(discord9): reconsider time assignment mechanism
let now = self.tick_manager.tick();
let fetch_order = {
let ctx = self.node_context.lock().await;
let table_col_names = ctx
.table_repr
.get_by_table_id(&table_id)
.map(|r| r.1)
.and_then(|id| ctx.schema.get(&id))
.map(|desc| &desc.names)
.context(UnexpectedSnafu {
err_msg: format!("Table not found: {}", table_id),
})?;
let name_to_col = HashMap::<_, _>::from_iter(
insert_schema
.iter()
.enumerate()
.map(|(i, name)| (&name.column_name, i)),
);
let fetch_order: Vec<usize> = table_col_names
.iter()
.map(|names| {
name_to_col.get(names).copied().context(UnexpectedSnafu {
err_msg: format!("Column not found: {}", names),
})
})
.try_collect()?;
if !fetch_order.iter().enumerate().all(|(i, &v)| i == v) {
debug!("Reordering columns: {:?}", fetch_order)
}
fetch_order
};
let rows: Vec<DiffRow> = rows_proto
.into_iter()
.map(repr::Row::from)
.map(|r| {
let r = repr::Row::from(r);
let reordered = fetch_order
.iter()
.map(|&i| r.inner[i].clone())
.collect_vec();
repr::Row::new(reordered)
})
.map(|r| (r, now, 1))
.collect_vec();
self.handle_write_request(region_id.into(), rows)

View File

@@ -27,7 +27,7 @@ use crate::adapter::error::{Error, EvalSnafu, TableNotFoundSnafu};
use crate::adapter::{FlowId, TableName, TableSource};
use crate::expr::error::InternalSnafu;
use crate::expr::GlobalId;
use crate::repr::{DiffRow, RelationType, BROADCAST_CAP};
use crate::repr::{DiffRow, RelationDesc, RelationType, BROADCAST_CAP};
/// A context that holds the information of the dataflow
#[derive(Default, Debug)]
@@ -51,10 +51,8 @@ pub struct FlownodeContext {
mpsc::UnboundedReceiver<DiffRow>,
),
>,
/// store source in buffer for each source table, in case broadcast channel is full
pub send_buffer: BTreeMap<TableId, VecDeque<DiffRow>>,
/// the schema of the table, query from metasrv or inferred from TypedPlan
pub schema: HashMap<GlobalId, RelationType>,
pub schema: HashMap<GlobalId, RelationDesc>,
/// All the tables that have been registered in the worker
pub table_repr: IdToNameMap,
pub query_context: Option<Arc<QueryContext>>,
@@ -109,6 +107,7 @@ impl SourceSender {
}
if row_cnt > 0 {
debug!("Send {} rows", row_cnt);
debug!("Send buf len = {}", self.send_buf.len());
}
Ok(row_cnt)
@@ -140,12 +139,19 @@ impl FlownodeContext {
}
/// flush all sender's buf
///
/// return numbers being sent
pub fn flush_all_sender(&mut self) -> Result<usize, Error> {
self.source_sender
.iter_mut()
.map(|(_table_id, src_sender)| src_sender.try_send_all())
.try_fold(0, |acc, x| x.map(|x| x + acc))
}
/// Return the sum number of rows in all send buf
pub fn get_send_buf_size(&self) -> usize {
self.source_sender.values().map(|v| v.send_buf.len()).sum()
}
}
impl FlownodeContext {
@@ -226,7 +232,7 @@ impl FlownodeContext {
/// Retrieves a GlobalId and table schema representing a table previously registered by calling the [register_table] function.
///
/// Returns an error if no table has been registered with the provided names
pub fn table(&self, name: &TableName) -> Result<(GlobalId, RelationType), Error> {
pub fn table(&self, name: &TableName) -> Result<(GlobalId, RelationDesc), Error> {
let id = self
.table_repr
.get_by_name(name)
@@ -297,7 +303,7 @@ impl FlownodeContext {
.get_by_name(table_name)
.map(|(_, gid)| gid)
.unwrap();
self.schema.insert(gid, schema);
self.schema.insert(gid, schema.into_unnamed());
Ok(())
}

View File

@@ -17,7 +17,6 @@
use common_error::ext::BoxedError;
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameManager};
use itertools::Itertools;
use snafu::{OptionExt, ResultExt};
use table::metadata::TableId;
@@ -25,7 +24,7 @@ use crate::adapter::error::{
Error, ExternalSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu,
};
use crate::adapter::TableName;
use crate::repr::{self, ColumnType, RelationType};
use crate::repr::{self, ColumnType, RelationDesc, RelationType};
/// mapping of table name <-> table id should be query from tableinfo manager
pub struct TableSource {
@@ -107,7 +106,7 @@ impl TableSource {
pub async fn get_table_name_schema(
&self,
table_id: &TableId,
) -> Result<(TableName, RelationType), Error> {
) -> Result<(TableName, RelationDesc), Error> {
let table_info_value = self
.get_table_info_value(table_id)
.await?
@@ -123,14 +122,20 @@ impl TableSource {
];
let raw_schema = table_info_value.table_info.meta.schema;
let column_types = raw_schema
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
.column_schemas
.clone()
.into_iter()
.map(|col| ColumnType {
nullable: col.is_nullable(),
scalar_type: col.data_type,
.map(|col| {
(
ColumnType {
nullable: col.is_nullable(),
scalar_type: col.data_type,
},
col.name,
)
})
.collect_vec();
.unzip();
let key = table_info_value.table_info.meta.primary_key_indices;
let keys = vec![repr::Key::from(key)];
@@ -138,10 +143,13 @@ impl TableSource {
let time_index = raw_schema.timestamp_index;
Ok((
table_name,
RelationType {
column_types,
keys,
time_index,
RelationDesc {
typ: RelationType {
column_types,
keys,
time_index,
},
names: col_names,
},
))
}

View File

@@ -232,12 +232,12 @@ impl<'s> Worker<'s> {
source_ids: &[GlobalId],
src_recvs: Vec<broadcast::Receiver<DiffRow>>,
// TODO(discord9): set expire duration for all arrangement and compare to sys timestamp instead
expire_when: Option<repr::Duration>,
create_if_not_exist: bool,
expire_after: Option<repr::Duration>,
create_if_not_exists: bool,
err_collector: ErrCollector,
) -> Result<Option<FlowId>, Error> {
let already_exist = self.task_states.contains_key(&flow_id);
match (already_exist, create_if_not_exist) {
let already_exists = self.task_states.contains_key(&flow_id);
match (already_exists, create_if_not_exists) {
(true, true) => return Ok(None),
(true, false) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
(false, _) => (),
@@ -247,7 +247,7 @@ impl<'s> Worker<'s> {
err_collector,
..Default::default()
};
cur_task_state.state.set_expire_after(expire_when);
cur_task_state.state.set_expire_after(expire_after);
{
let mut ctx = cur_task_state.new_ctx(sink_id);
@@ -319,8 +319,8 @@ impl<'s> Worker<'s> {
sink_sender,
source_ids,
src_recvs,
expire_when,
create_if_not_exist,
expire_after,
create_if_not_exists,
err_collector,
} => {
let task_create_result = self.create_flow(
@@ -330,8 +330,8 @@ impl<'s> Worker<'s> {
sink_sender,
&source_ids,
src_recvs,
expire_when,
create_if_not_exist,
expire_after,
create_if_not_exists,
err_collector,
);
Some((
@@ -368,8 +368,8 @@ pub enum Request {
sink_sender: mpsc::UnboundedSender<DiffRow>,
source_ids: Vec<GlobalId>,
src_recvs: Vec<broadcast::Receiver<DiffRow>>,
expire_when: Option<repr::Duration>,
create_if_not_exist: bool,
expire_after: Option<repr::Duration>,
create_if_not_exists: bool,
err_collector: ErrCollector,
},
Remove {
@@ -524,8 +524,8 @@ mod test {
sink_sender: sink_tx,
source_ids: src_ids,
src_recvs: vec![rx],
expire_when: None,
create_if_not_exist: true,
expire_after: None,
create_if_not_exists: true,
err_collector: ErrCollector::default(),
};
handle.create_flow(create_reqs).await.unwrap();

View File

@@ -124,9 +124,13 @@ fn mfp_subgraph(
// 1. Read all updates that were emitted between the last time this arrangement had updates and the current time.
// 2. Output the updates.
// 3. Truncate all updates within that range.
let from = arrange.read().last_compaction_time().map(|n| n + 1);
let from = arrange.read().last_compaction_time();
let from = from.unwrap_or(repr::Timestamp::MIN);
let output_kv = arrange.read().get_updates_in_range(from..=now);
let range = (
std::ops::Bound::Excluded(from),
std::ops::Bound::Included(now),
);
let output_kv = arrange.read().get_updates_in_range(range);
// the output is expected to be key -> empty val
let output = output_kv
.into_iter()

View File

@@ -26,7 +26,7 @@ use crate::adapter::error::{Error, PlanSnafu};
use crate::compute::render::{Context, SubgraphArg};
use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
use crate::expr::error::{DataTypeSnafu, InternalSnafu};
use crate::expr::error::{DataAlreadyExpiredSnafu, DataTypeSnafu, InternalSnafu};
use crate::expr::{AggregateExpr, EvalError, ScalarExpr};
use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, Plan, ReducePlan, TypedPlan};
use crate::repr::{self, DiffRow, KeyValDiffRow, RelationType, Row};
@@ -301,9 +301,13 @@ fn update_reduce_distinct_arrange(
// Deal with output:
// 1. Read all updates that were emitted between the last time this arrangement had updates and the current time.
let from = arrange.read().last_compaction_time().map(|n| n + 1);
let from = arrange.read().last_compaction_time();
let from = from.unwrap_or(repr::Timestamp::MIN);
let output_kv = arrange.read().get_updates_in_range(from..=now);
let range = (
std::ops::Bound::Excluded(from),
std::ops::Bound::Included(now),
);
let output_kv = arrange.read().get_updates_in_range(range);
// 2. Truncate all updates stored in arrangement within that range.
let run_compaction = || {
@@ -397,6 +401,24 @@ fn reduce_accum_subgraph(
// TODO(discord9): consider key-based lock
let mut arrange = arrange.write();
for (key, value_diffs) in key_to_vals {
if let Some(expire_man) = &arrange.get_expire_state() {
let mut is_expired = false;
err_collector.run(|| {
if let Some(expired) = expire_man.get_expire_duration(now, &key)? {
is_expired = true;
DataAlreadyExpiredSnafu {
expired_by: expired,
}
.fail()
} else {
Ok(())
}
});
if is_expired {
// errors already collected, we can just continue to next key
continue;
}
}
let col_diffs = {
let row_len = value_diffs[0].0.len();
let res = err_collector.run(|| get_col_diffs(value_diffs, row_len));

View File

@@ -55,9 +55,12 @@ impl<'referred, 'df> Context<'referred, 'df> {
.df
.add_subgraph_source("source", send_port, move |_ctx, send| {
let now = *now.borrow();
let arr = arrange_handler_inner.write().get_updates_in_range(..=now);
err_collector.run(|| arrange_handler_inner.write().compact_to(now));
// write lock to prevent unexpected mutation
let mut arranged = arrange_handler_inner.write();
let arr = arranged.get_updates_in_range(..=now);
err_collector.run(|| arranged.compact_to(now));
debug!("Call source");
let prev_avail = arr.into_iter().map(|((k, _), t, d)| (k, t, d));
let mut to_send = Vec::new();
let mut to_arrange = Vec::new();
@@ -77,10 +80,10 @@ impl<'referred, 'df> Context<'referred, 'df> {
to_arrange.len()
);
}
err_collector.run(|| arrange_handler_inner.write().apply_updates(now, to_arrange));
err_collector.run(|| arranged.apply_updates(now, to_arrange));
send.give(all);
// always schedule source to run at next tick
inner_schd.schedule_at(now + 1);
// always schedule source to run at now so we can repeatedly run source if needed
inner_schd.schedule_at(now);
});
schd.set_cur_subgraph(sub);
let arranged = Arranged::new(arrange_handler);

View File

@@ -100,4 +100,11 @@ pub enum EvalError {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Incoming data already expired by {} ms", expired_by))]
DataAlreadyExpired {
expired_by: i64,
#[snafu(implicit)]
location: Location,
},
}

View File

@@ -278,9 +278,9 @@ impl UnaryFunc {
start_time,
} => {
let ts = get_ts_as_millisecond(arg)?;
let start_time = start_time.map(|t| t.val()).unwrap_or(0);
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let window_start = start_time + (ts - start_time) / window_size * window_size;
let window_start = get_window_start(ts, window_size, start_time);
let ret = Timestamp::new_millisecond(window_start);
Ok(Value::from(ret))
@@ -290,9 +290,9 @@ impl UnaryFunc {
start_time,
} => {
let ts = get_ts_as_millisecond(arg)?;
let start_time = start_time.map(|t| t.val()).unwrap_or(0);
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let window_start = start_time + (ts - start_time) / window_size * window_size;
let window_start = get_window_start(ts, window_size, start_time);
let window_end = window_start + window_size;
let ret = Timestamp::new_millisecond(window_end);
@@ -302,6 +302,35 @@ impl UnaryFunc {
}
}
fn get_window_start(
ts: repr::Timestamp,
window_size: repr::Duration,
start_time: Option<repr::Timestamp>,
) -> repr::Timestamp {
let start_time = start_time.unwrap_or(0);
// left close right open
if ts >= start_time {
start_time + (ts - start_time) / window_size * window_size
} else {
start_time + (ts - start_time) / window_size * window_size
- if ((start_time - ts) % window_size) != 0 {
window_size
} else {
0
}
}
}
#[test]
fn test_get_window_start() {
assert_eq!(get_window_start(1, 3, None), 0);
assert_eq!(get_window_start(3, 3, None), 3);
assert_eq!(get_window_start(0, 3, None), 0);
assert_eq!(get_window_start(-1, 3, None), -3);
assert_eq!(get_window_start(-3, 3, None), -3);
}
fn get_ts_as_millisecond(arg: Value) -> Result<repr::Timestamp, EvalError> {
let ts = if let Some(ts) = arg.as_timestamp() {
ts.convert_to(TimeUnit::Millisecond)

View File

@@ -45,6 +45,8 @@ impl TypedExpr {
impl TypedExpr {
/// expand multi-value expression to multiple expressions with new indices
///
/// Currently it just mean expand `TumbleWindow` to `TumbleWindowFloor` and `TumbleWindowCeiling`
pub fn expand_multi_value(
input_typ: &RelationType,
exprs: &[TypedExpr],
@@ -106,7 +108,7 @@ impl TypedExpr {
})
.collect::<Result<Vec<_>, _>>()?;
Ok(dbg!(exprs))
Ok(exprs)
}
}

View File

@@ -262,6 +262,19 @@ impl RelationType {
true
}
/// Return relation describe with column names
pub fn into_named(self, names: Vec<ColumnName>) -> RelationDesc {
RelationDesc { typ: self, names }
}
/// Return relation describe without column names
pub fn into_unnamed(self) -> RelationDesc {
RelationDesc {
typ: self,
names: vec![],
}
}
}
/// The type of a `Value`
@@ -325,8 +338,8 @@ fn return_true() -> bool {
/// Individual column names are optional.
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct RelationDesc {
typ: RelationType,
names: Vec<ColumnName>,
pub typ: RelationType,
pub names: Vec<ColumnName>,
}
impl RelationDesc {

View File

@@ -211,7 +211,7 @@ mod test {
let schema = RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)]);
tri_map.insert(Some(name.clone()), Some(1024), gid);
schemas.insert(gid, schema);
schemas.insert(gid, schema.into_unnamed());
}
{
@@ -225,7 +225,7 @@ mod test {
ColumnType::new(CDT::uint32_datatype(), false),
ColumnType::new(CDT::datetime_datatype(), false),
]);
schemas.insert(gid, schema);
schemas.insert(gid, schema.into_unnamed());
tri_map.insert(Some(name.clone()), Some(1025), gid);
}

View File

@@ -435,6 +435,236 @@ mod test {
use crate::repr::{self, ColumnType, RelationType};
use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait};
/// TODO(discord9): add more illegal sql tests
#[tokio::test]
async fn test_tumble_composite() {
let engine = create_test_query_engine();
let sql =
"SELECT number, avg(number) FROM numbers_with_ts GROUP BY tumble(ts, '1 hour'), number";
let plan = sql_to_substrait(engine.clone(), sql).await;
let mut ctx = create_test_ctx();
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan).unwrap();
let aggr_exprs = vec![
AggregateExpr {
func: AggregateFunc::SumUInt32,
expr: ScalarExpr::Column(0),
distinct: false,
},
AggregateExpr {
func: AggregateFunc::Count,
expr: ScalarExpr::Column(0),
distinct: false,
},
];
let avg_expr = ScalarExpr::If {
cond: Box::new(ScalarExpr::Column(4).call_binary(
ScalarExpr::Literal(Value::from(0i64), CDT::int64_datatype()),
BinaryFunc::NotEq,
)),
then: Box::new(ScalarExpr::Column(3).call_binary(
ScalarExpr::Column(4).call_unary(UnaryFunc::Cast(CDT::uint64_datatype())),
BinaryFunc::DivUInt64,
)),
els: Box::new(ScalarExpr::Literal(Value::Null, CDT::uint64_datatype())),
};
let expected = TypedPlan {
// TODO(discord9): mfp indirectly ref to key columns
/*
.with_key(vec![1])
.with_time_index(Some(0)),*/
plan: Plan::Mfp {
input: Box::new(
Plan::Reduce {
input: Box::new(
Plan::Get {
id: crate::expr::Id::Global(GlobalId::User(1)),
}
.with_types(RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
])),
),
key_val_plan: KeyValPlan {
key_plan: MapFilterProject::new(2)
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: None,
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: None,
},
),
ScalarExpr::Column(0),
])
.unwrap()
.project(vec![2, 3, 4])
.unwrap()
.into_safe(),
val_plan: MapFilterProject::new(2)
.project(vec![0, 1])
.unwrap()
.into_safe(),
},
reduce_plan: ReducePlan::Accumulable(AccumulablePlan {
full_aggrs: aggr_exprs.clone(),
simple_aggrs: vec![
AggrWithIndex::new(aggr_exprs[0].clone(), 0, 0),
AggrWithIndex::new(aggr_exprs[1].clone(), 0, 1),
],
distinct_aggrs: vec![],
}),
}
.with_types(
RelationType::new(vec![
// keys
ColumnType::new(CDT::datetime_datatype(), false), // window start(time index)
ColumnType::new(CDT::datetime_datatype(), false), // window end(pk)
ColumnType::new(CDT::uint32_datatype(), false), // number(pk)
// values
ColumnType::new(CDT::uint64_datatype(), true), // avg.sum(number)
ColumnType::new(CDT::int64_datatype(), true), // avg.count(number)
])
.with_key(vec![1, 2])
.with_time_index(Some(0)),
),
),
mfp: MapFilterProject::new(5)
.map(vec![
avg_expr,
ScalarExpr::Column(2), // number(pk)
ScalarExpr::Column(5), // avg.sum(number)
ScalarExpr::Column(0), // window start
ScalarExpr::Column(1), // window end
])
.unwrap()
.project(vec![6, 7, 8, 9])
.unwrap(),
},
typ: RelationType::new(vec![
ColumnType::new(CDT::uint32_datatype(), false), // number
ColumnType::new(CDT::uint64_datatype(), true), // avg(number)
ColumnType::new(CDT::datetime_datatype(), false), // window start
ColumnType::new(CDT::datetime_datatype(), false), // window end
]),
};
assert_eq!(flow_plan, expected);
}
#[tokio::test]
async fn test_tumble_parse_optional() {
let engine = create_test_query_engine();
let sql = "SELECT sum(number) FROM numbers_with_ts GROUP BY tumble(ts, '1 hour')";
let plan = sql_to_substrait(engine.clone(), sql).await;
let mut ctx = create_test_ctx();
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan).unwrap();
let aggr_expr = AggregateExpr {
func: AggregateFunc::SumUInt32,
expr: ScalarExpr::Column(0),
distinct: false,
};
let expected = TypedPlan {
typ: RelationType::new(vec![
ColumnType::new(CDT::uint64_datatype(), true), // sum(number)
ColumnType::new(CDT::datetime_datatype(), false), // window start
ColumnType::new(CDT::datetime_datatype(), false), // window end
]),
// TODO(discord9): mfp indirectly ref to key columns
/*
.with_key(vec![1])
.with_time_index(Some(0)),*/
plan: Plan::Mfp {
input: Box::new(
Plan::Reduce {
input: Box::new(
Plan::Get {
id: crate::expr::Id::Global(GlobalId::User(1)),
}
.with_types(RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
])),
),
key_val_plan: KeyValPlan {
key_plan: MapFilterProject::new(2)
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: None,
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: None,
},
),
])
.unwrap()
.project(vec![2, 3])
.unwrap()
.into_safe(),
val_plan: MapFilterProject::new(2)
.project(vec![0, 1])
.unwrap()
.into_safe(),
},
reduce_plan: ReducePlan::Accumulable(AccumulablePlan {
full_aggrs: vec![aggr_expr.clone()],
simple_aggrs: vec![AggrWithIndex::new(aggr_expr.clone(), 0, 0)],
distinct_aggrs: vec![],
}),
}
.with_types(
RelationType::new(vec![
ColumnType::new(CDT::datetime_datatype(), false), // window start
ColumnType::new(CDT::datetime_datatype(), false), // window end
ColumnType::new(CDT::uint64_datatype(), true), //sum(number)
])
.with_key(vec![1])
.with_time_index(Some(0)),
),
),
mfp: MapFilterProject::new(3)
.map(vec![
ScalarExpr::Column(2),
ScalarExpr::Column(3),
ScalarExpr::Column(0),
ScalarExpr::Column(1),
])
.unwrap()
.project(vec![4, 5, 6])
.unwrap(),
},
};
assert_eq!(flow_plan, expected);
}
#[tokio::test]
async fn test_tumble_parse() {
let engine = create_test_query_engine();

View File

@@ -101,8 +101,7 @@ impl TypedExpr {
.unzip();
match arg_len {
// because variadic function can also have 1 arguments, we need to check if it's a variadic function first
1 if VariadicFunc::from_str_and_types(fn_name, &arg_types).is_err() => {
1 if UnaryFunc::from_str_and_type(fn_name, None).is_ok() => {
let func = UnaryFunc::from_str_and_type(fn_name, None)?;
let arg = arg_exprs[0].clone();
let ret_type = ColumnType::new_nullable(func.signature().output.clone());
@@ -124,8 +123,13 @@ impl TypedExpr {
Ok(TypedExpr::new(arg.call_unary(func), ret_type))
}
// because variadic function can also have 2 arguments, we need to check if it's a variadic function first
2 if VariadicFunc::from_str_and_types(fn_name, &arg_types).is_err() => {
2 if BinaryFunc::from_str_expr_and_type(
fn_name,
&arg_exprs,
arg_types.get(0..2).expect("arg have 2 elements"),
)
.is_ok() =>
{
let (func, signature) =
BinaryFunc::from_str_expr_and_type(fn_name, &arg_exprs, &arg_types[0..2])?;

View File

@@ -269,7 +269,7 @@ impl TypedPlan {
id: crate::expr::Id::Global(table.0),
};
let get_table = TypedPlan {
typ: table.1,
typ: table.1.typ().clone(),
plan: get_table,
};

View File

@@ -18,6 +18,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::ops::Bound;
use std::sync::Arc;
use common_telemetry::debug;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use smallvec::{smallvec, SmallVec};
@@ -86,7 +87,7 @@ impl KeyExpiryManager {
///
/// - If given key is expired by now (that is less than `now - expiry_duration`), return the amount of time it's expired.
/// - If it's not expired, return None
pub fn update_event_ts(
pub fn get_expire_duration_and_update_event_ts(
&mut self,
now: Timestamp,
row: &Row,
@@ -95,6 +96,33 @@ impl KeyExpiryManager {
return Ok(None);
};
self.event_ts_to_key
.entry(event_ts)
.or_default()
.insert(row.clone());
if let Some(expire_time) = self.compute_expiration_timestamp(now) {
if expire_time > event_ts {
// return how much time it's expired
return Ok(Some(expire_time - event_ts));
}
}
Ok(None)
}
/// Get the expire duration of a key, if it's expired by now.
///
/// Return None if the key is not expired
pub fn get_expire_duration(
&self,
now: Timestamp,
row: &Row,
) -> Result<Option<Duration>, EvalError> {
let Some(event_ts) = self.extract_event_ts(row)? else {
return Ok(None);
};
if let Some(expire_time) = self.compute_expiration_timestamp(now) {
if expire_time > event_ts {
// return how much time it's expired
@@ -102,10 +130,6 @@ impl KeyExpiryManager {
}
}
self.event_ts_to_key
.entry(event_ts)
.or_default()
.insert(row.clone());
Ok(None)
}
@@ -189,6 +213,10 @@ impl Arrangement {
}
}
pub fn get_expire_state(&self) -> Option<&KeyExpiryManager> {
self.expire_state.as_ref()
}
pub fn set_expire_state(&mut self, expire_state: KeyExpiryManager) {
self.expire_state = Some(expire_state);
}
@@ -208,8 +236,12 @@ impl Arrangement {
for ((key, val), update_ts, diff) in updates {
// check if the key is expired
if let Some(s) = &mut self.expire_state {
if let Some(expired_by) = s.update_event_ts(now, &key)? {
if let Some(expired_by) = s.get_expire_duration_and_update_event_ts(now, &key)? {
max_expired_by = max_expired_by.max(Some(expired_by));
debug!(
"Expired key: {:?}, expired by: {:?} with time being now={}",
key, expired_by, now
);
continue;
}
}
@@ -335,7 +367,9 @@ impl Arrangement {
for (key, updates) in batch {
// check if the key is expired
if let Some(s) = &mut self.expire_state {
if let Some(expired_by) = s.update_event_ts(now, &key)? {
if let Some(expired_by) =
s.get_expire_duration_and_update_event_ts(now, &key)?
{
max_expired_by = max_expired_by.max(Some(expired_by));
continue;
}
@@ -540,6 +574,10 @@ impl ArrangeHandler {
pub fn set_full_arrangement(&self, full: bool) {
self.write().full_arrangement = full;
}
pub fn is_full_arrangement(&self) -> bool {
self.read().full_arrangement
}
}
#[cfg(test)]

View File

@@ -355,6 +355,14 @@ pub enum Error {
location: Location,
name: String,
},
#[snafu(display("Invalid tls config"))]
InvalidTlsConfig {
#[snafu(source)]
error: common_grpc::error::Error,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -374,7 +382,8 @@ impl ErrorExt for Error {
| Error::IllegalAuthConfig { .. }
| Error::EmptyData { .. }
| Error::ColumnNoneDefaultValue { .. }
| Error::IncompleteGrpcRequest { .. } => StatusCode::InvalidArguments,
| Error::IncompleteGrpcRequest { .. }
| Error::InvalidTlsConfig { .. } => StatusCode::InvalidArguments,
Error::NotSupported { .. } => StatusCode::Unsupported,

View File

@@ -151,7 +151,7 @@ impl HeartbeatTask {
Some(req)
}
Err(e) => {
error!(e;"Failed to encode mailbox messages!");
error!(e; "Failed to encode mailbox messages");
None
}
}

View File

@@ -76,9 +76,12 @@ where
let grpc_config = GrpcServerConfig {
max_recv_message_size: opts.max_recv_message_size.as_bytes() as usize,
max_send_message_size: opts.max_send_message_size.as_bytes() as usize,
tls: opts.tls.clone(),
};
Ok(GrpcServerBuilder::new(grpc_config, grpc_runtime))
let builder = GrpcServerBuilder::new(grpc_config, grpc_runtime)
.with_tls_config(opts.tls.clone())
.context(error::InvalidTlsConfigSnafu)?;
Ok(builder)
}
pub fn http_server_builder(&self, opts: &FrontendOptions) -> HttpServerBuilder {

View File

@@ -17,6 +17,7 @@ use common_grpc::channel_manager::{
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
};
use serde::{Deserialize, Serialize};
use servers::tls::TlsOption;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct GrpcOptions {
@@ -26,6 +27,8 @@ pub struct GrpcOptions {
pub max_recv_message_size: ReadableSize,
// Max gRPC sending(encoding) message size
pub max_send_message_size: ReadableSize,
#[serde(default = "Default::default")]
pub tls: TlsOption,
}
impl Default for GrpcOptions {
@@ -35,6 +38,7 @@ impl Default for GrpcOptions {
runtime_size: 8,
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
tls: TlsOption::default(),
}
}
}

View File

@@ -1,59 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
#[derive(Debug, Clone)]
pub struct LogConfig {
pub file_size: u64,
pub log_file_dir: String,
pub purge_interval: Duration,
pub purge_threshold: u64,
pub read_batch_size: usize,
pub sync_write: bool,
}
impl Default for LogConfig {
/// Default value of config stores log file into a tmp directory, which should only be used
/// in tests.
fn default() -> Self {
Self {
file_size: 1024 * 1024 * 1024,
log_file_dir: "/tmp/greptimedb".to_string(),
purge_interval: Duration::from_secs(10 * 60),
purge_threshold: 1024 * 1024 * 1024 * 50,
read_batch_size: 128,
sync_write: false,
}
}
}
#[cfg(test)]
mod tests {
use common_telemetry::info;
use super::*;
#[test]
pub fn test_default_config() {
common_telemetry::common_telemetry::init_default_ut_logging();
let default = LogConfig::default();
info!("LogConfig::default(): {:?}", default);
assert_eq!(1024 * 1024 * 1024, default.file_size);
assert_eq!(Duration::from_secs(600), default.purge_interval);
assert_eq!(1024 * 1024 * 1024 * 50, default.purge_threshold);
assert_eq!(128, default.read_batch_size);
assert!(!default.sync_write);
}
}

View File

@@ -20,10 +20,9 @@ pub(crate) mod util;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
use store_api::logstore::entry::{Entry, Id as EntryId};
use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::Namespace;
use crate::error::Error;
use store_api::storage::RegionId;
/// Kafka Namespace implementation.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
@@ -56,7 +55,13 @@ pub struct EntryImpl {
}
impl Entry for EntryImpl {
type Error = Error;
fn into_raw_entry(self) -> RawEntry {
RawEntry {
region_id: self.region_id(),
entry_id: self.id(),
data: self.data,
}
}
fn data(&self) -> &[u8] {
&self.data
@@ -66,6 +71,10 @@ impl Entry for EntryImpl {
self.id
}
fn region_id(&self) -> RegionId {
RegionId::from_u64(self.ns.region_id)
}
fn estimated_size(&self) -> usize {
size_of::<Self>() + self.data.capacity() * size_of::<u8>() + self.ns.topic.capacity()
}

View File

@@ -13,9 +13,10 @@
// limitations under the License.
use common_wal::options::WalOptions;
use store_api::logstore::entry::{Entry, Id as EntryId};
use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
use store_api::storage::RegionId;
use crate::error::{Error, Result};
@@ -36,7 +37,13 @@ impl Namespace for NamespaceImpl {
}
impl Entry for EntryImpl {
type Error = Error;
fn into_raw_entry(self) -> RawEntry {
RawEntry {
region_id: self.region_id(),
entry_id: self.id(),
data: vec![],
}
}
fn data(&self) -> &[u8] {
&[]
@@ -46,6 +53,10 @@ impl Entry for EntryImpl {
0
}
fn region_id(&self) -> RegionId {
RegionId::from_u64(0)
}
fn estimated_size(&self) -> usize {
0
}

View File

@@ -15,10 +15,10 @@
use std::hash::{Hash, Hasher};
use std::mem::size_of;
use store_api::logstore::entry::{Entry, Id as EntryId};
use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
use store_api::storage::RegionId;
use crate::error::Error;
use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl};
mod backend;
@@ -67,7 +67,13 @@ impl Namespace for NamespaceImpl {
}
impl Entry for EntryImpl {
type Error = Error;
fn into_raw_entry(self) -> RawEntry {
RawEntry {
region_id: self.region_id(),
entry_id: self.id(),
data: self.data,
}
}
fn data(&self) -> &[u8] {
self.data.as_slice()
@@ -77,6 +83,10 @@ impl Entry for EntryImpl {
self.id
}
fn region_id(&self) -> RegionId {
RegionId::from_u64(self.id)
}
fn estimated_size(&self) -> usize {
self.data.len() + size_of::<u64>() + size_of::<u64>()
}

View File

@@ -46,6 +46,7 @@ use crate::metasrv::builder::MetasrvBuilder;
use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::load_based::LoadBasedSelector;
use crate::selector::round_robin::RoundRobinSelector;
use crate::selector::SelectorType;
use crate::service::admin;
use crate::{error, Result};
@@ -228,6 +229,7 @@ pub async fn metasrv_builder(
let selector = match opts.selector {
SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::default()) as SelectorRef,
};
Ok(MetasrvBuilder::new()

Some files were not shown because too many files have changed in this diff Show More