mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
193 Commits
v0.4.1
...
script_wra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f5e56196 | ||
|
|
c85d569797 | ||
|
|
e95a8e070c | ||
|
|
b71bf11772 | ||
|
|
ee0a3972fc | ||
|
|
8fb40c66a4 | ||
|
|
e855f6370e | ||
|
|
fb5dcbc40c | ||
|
|
0d109436b8 | ||
|
|
cbae03af07 | ||
|
|
902e6ead60 | ||
|
|
f9e7762c5b | ||
|
|
0b421b5177 | ||
|
|
aa89d9deef | ||
|
|
b3ffe5cd1e | ||
|
|
d6ef7a75de | ||
|
|
6344b1e0db | ||
|
|
7d506b3c5f | ||
|
|
96e12e9ee5 | ||
|
|
a9db80ab1a | ||
|
|
5f5dbe0172 | ||
|
|
dac7a41cbd | ||
|
|
de416465a6 | ||
|
|
58c13739f0 | ||
|
|
806400caff | ||
|
|
f78dab078c | ||
|
|
7a14db68a6 | ||
|
|
c26f2f94c0 | ||
|
|
781f2422b3 | ||
|
|
7e68ecc498 | ||
|
|
9ce9421850 | ||
|
|
c0df2b9086 | ||
|
|
29d344ccd2 | ||
|
|
fe2fc723bc | ||
|
|
2332305b90 | ||
|
|
9ccd182109 | ||
|
|
ae8153515b | ||
|
|
cce5edc88e | ||
|
|
616eb04914 | ||
|
|
7c53f92e4b | ||
|
|
445bd92c7a | ||
|
|
92a9802343 | ||
|
|
abbac46c05 | ||
|
|
d0d0f091f0 | ||
|
|
707a0d5626 | ||
|
|
e42767d500 | ||
|
|
ca18ccf7d4 | ||
|
|
b1d8812806 | ||
|
|
7547e7ebdf | ||
|
|
6100cb335a | ||
|
|
0badb3715e | ||
|
|
bd9c2f2666 | ||
|
|
b3edbef1f3 | ||
|
|
9e58bba363 | ||
|
|
3a4c9f2b45 | ||
|
|
64a36e9b36 | ||
|
|
33566ea0f0 | ||
|
|
ff8ab6763b | ||
|
|
00e4bd45f0 | ||
|
|
85eebcb16f | ||
|
|
102e43aace | ||
|
|
56fc77e573 | ||
|
|
4c76d4d97e | ||
|
|
9e5cdf47d9 | ||
|
|
bdb677dc52 | ||
|
|
99dbb7401c | ||
|
|
a7bbd61f28 | ||
|
|
efc5abfc02 | ||
|
|
43a7457e15 | ||
|
|
20f01219e9 | ||
|
|
dc351a6de9 | ||
|
|
5f87b1f714 | ||
|
|
b9146c88ff | ||
|
|
9558b3c201 | ||
|
|
da68d8ce4b | ||
|
|
01867adaa7 | ||
|
|
d9eeeee06e | ||
|
|
4fcda272fb | ||
|
|
ce959ddd3f | ||
|
|
730a3faa02 | ||
|
|
91820a8006 | ||
|
|
500e299e40 | ||
|
|
ac4b6cd7f0 | ||
|
|
3ab494764f | ||
|
|
5608035074 | ||
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 | ||
|
|
f5eede4ce1 | ||
|
|
22ee45f3df | ||
|
|
8fd0766754 | ||
|
|
af7107565a | ||
|
|
f02dc0e274 | ||
|
|
b53537e69b | ||
|
|
0cd6dacb45 | ||
|
|
a3611516a2 | ||
|
|
93f21b188d | ||
|
|
b9a7c2db7e | ||
|
|
c62ba79759 | ||
|
|
9d029f7337 | ||
|
|
f1e8afcda9 | ||
|
|
9697632888 | ||
|
|
69ee2c336c | ||
|
|
1f57c6b1f0 | ||
|
|
53a5864944 | ||
|
|
5b70881098 | ||
|
|
06d273b75a | ||
|
|
b382900c5c | ||
|
|
c79bb5a936 | ||
|
|
7e0dcfc797 | ||
|
|
51ddebdc73 | ||
|
|
e9f7579091 | ||
|
|
f387a09535 | ||
|
|
cf94d3295f | ||
|
|
0a91335e24 | ||
|
|
6fd04e38a3 | ||
|
|
bbaae9223a | ||
|
|
060864d0c1 | ||
|
|
395632c874 | ||
|
|
0dca63bc7b | ||
|
|
7323d727c9 | ||
|
|
68f92ecf08 | ||
|
|
39d52f25bf | ||
|
|
fb8d0c6ce5 | ||
|
|
ce867fb583 | ||
|
|
04a8fc5138 | ||
|
|
479ffe5a0f | ||
|
|
4b48c716b2 | ||
|
|
a9137b77f0 | ||
|
|
5f3bbdca4f | ||
|
|
7bd137f398 | ||
|
|
15a0775a3c | ||
|
|
180bc64cb0 | ||
|
|
e3320c531d | ||
|
|
d77003fb3b | ||
|
|
54ed7529ca | ||
|
|
465c8f714e | ||
|
|
88eb69530a | ||
|
|
36c0742c45 | ||
|
|
84bcca9117 | ||
|
|
d2f3793d15 | ||
|
|
000e1471eb | ||
|
|
d0ff8ab191 | ||
|
|
bd177b8cc4 | ||
|
|
958ff3f185 | ||
|
|
5d8b0e8154 | ||
|
|
84490f56b8 | ||
|
|
cb97768004 | ||
|
|
f08a35d6b9 | ||
|
|
e8adaaf5f7 | ||
|
|
a63fa76b7b | ||
|
|
102e4c975d | ||
|
|
16a3257ada | ||
|
|
01fdbf3626 | ||
|
|
97897aaf9b | ||
|
|
1fc42a681f | ||
|
|
fbc8f56eaa | ||
|
|
44280f7c9d | ||
|
|
0fbde48655 | ||
|
|
9dcfd28f61 | ||
|
|
82dbc3e1ae | ||
|
|
4d478658b5 | ||
|
|
89ebe47cd9 | ||
|
|
212ea2c25c | ||
|
|
1658d088ab | ||
|
|
346b57cf10 | ||
|
|
e1dcf83326 | ||
|
|
b5d9d635eb | ||
|
|
88dd78a69c | ||
|
|
6439b929b3 | ||
|
|
ba15c14103 |
@@ -12,9 +12,4 @@ rustflags = [
|
|||||||
"-Wclippy::print_stdout",
|
"-Wclippy::print_stdout",
|
||||||
"-Wclippy::print_stderr",
|
"-Wclippy::print_stderr",
|
||||||
"-Wclippy::implicit_clone",
|
"-Wclippy::implicit_clone",
|
||||||
|
|
||||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
|
||||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
|
||||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
|
||||||
"-Aclippy::needless_pass_by_ref_mut",
|
|
||||||
]
|
]
|
||||||
|
|||||||
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: reproduce
|
||||||
attributes:
|
attributes:
|
||||||
label: What happened?
|
label: Minimal reproduce step
|
||||||
description: |
|
description: |
|
||||||
Tell us what happened and also what you would have expected to
|
Please walk us through and provide steps and details on how
|
||||||
happen instead.
|
to reproduce the issue. If possible, provide scripts that we
|
||||||
placeholder: "Describe the bug"
|
can run to trigger the bug.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected-manner
|
||||||
|
attributes:
|
||||||
|
label: What did you expect to see?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: actual-manner
|
||||||
|
attributes:
|
||||||
|
label: What did you see instead?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
@@ -72,14 +86,3 @@ body:
|
|||||||
trace. This will be automatically formatted into code, so no
|
trace. This will be automatically formatted into code, so no
|
||||||
need for backticks.
|
need for backticks.
|
||||||
render: bash
|
render: bash
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: reproduce
|
|
||||||
attributes:
|
|
||||||
label: How can we reproduce the bug?
|
|
||||||
description: |
|
|
||||||
Please walk us through and provide steps and details on how
|
|
||||||
to reproduce the issue. If possible, provide scripts that we
|
|
||||||
can run to trigger the bug.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|||||||
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Deploy GreptimeDB cluster
|
||||||
|
description: Deploy GreptimeDB cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
aws-ci-test-bucket:
|
||||||
|
description: 'AWS S3 bucket name for testing'
|
||||||
|
required: true
|
||||||
|
aws-region:
|
||||||
|
description: 'AWS region for testing'
|
||||||
|
required: true
|
||||||
|
data-root:
|
||||||
|
description: 'Data root for testing'
|
||||||
|
required: true
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS access key id for testing'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS secret access key for testing'
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Deploy GreptimeDB by Helm
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DATA_ROOT: ${{ inputs.data-root }}
|
||||||
|
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
|
||||||
|
AWS_REGION: ${{ inputs.aws-region }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/deploy-greptimedb.sh
|
||||||
@@ -31,10 +31,12 @@ runs:
|
|||||||
echo "prerelease=false" >> $GITHUB_ENV
|
echo "prerelease=false" >> $GITHUB_ENV
|
||||||
echo "makeLatest=true" >> $GITHUB_ENV
|
echo "makeLatest=true" >> $GITHUB_ENV
|
||||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||||
|
echo "omitBody=true" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
echo "prerelease=true" >> $GITHUB_ENV
|
echo "prerelease=true" >> $GITHUB_ENV
|
||||||
echo "makeLatest=false" >> $GITHUB_ENV
|
echo "makeLatest=false" >> $GITHUB_ENV
|
||||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||||
|
echo "omitBody=false" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Publish release
|
- name: Publish release
|
||||||
@@ -45,6 +47,7 @@ runs:
|
|||||||
makeLatest: ${{ env.makeLatest }}
|
makeLatest: ${{ env.makeLatest }}
|
||||||
tag: ${{ inputs.version }}
|
tag: ${{ inputs.version }}
|
||||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||||
|
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
|
||||||
allowUpdates: true
|
allowUpdates: true
|
||||||
artifacts: |
|
artifacts: |
|
||||||
**/greptime-*/*
|
**/greptime-*/*
|
||||||
|
|||||||
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: Run sqlness test
|
||||||
|
description: Run sqlness test on GreptimeDB
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
aws-ci-test-bucket:
|
||||||
|
description: 'AWS S3 bucket name for testing'
|
||||||
|
required: true
|
||||||
|
aws-region:
|
||||||
|
description: 'AWS region for testing'
|
||||||
|
required: true
|
||||||
|
data-root:
|
||||||
|
description: 'Data root for testing'
|
||||||
|
required: true
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS access key id for testing'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS secret access key for testing'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Deploy GreptimeDB cluster by Helm
|
||||||
|
uses: ./.github/actions/deploy-greptimedb
|
||||||
|
with:
|
||||||
|
data-root: ${{ inputs.data-root }}
|
||||||
|
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
|
||||||
|
aws-region: ${{ inputs.aws-region }}
|
||||||
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
|
|
||||||
|
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
|
||||||
|
- name: Run tests on greptimedb cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Run tests on greptimedb cluster that uses S3
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Run tests on standalone greptimedb
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Clean S3 data
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
run: |
|
||||||
|
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||||
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
@@ -0,0 +1,172 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||||
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
|
||||||
|
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||||
|
function create_kind_cluster() {
|
||||||
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
|
kind: Cluster
|
||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add greptime Helm chart repo.
|
||||||
|
function add_greptime_chart() {
|
||||||
|
helm repo add greptime "$GREPTIME_CHART"
|
||||||
|
helm repo update
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy a etcd cluster with 3 members.
|
||||||
|
function deploy_etcd_cluster() {
|
||||||
|
local namespace="$1"
|
||||||
|
|
||||||
|
helm install etcd "$ETCD_CHART" \
|
||||||
|
--set replicaCount=3 \
|
||||||
|
--set auth.rbac.create=false \
|
||||||
|
--set auth.rbac.token.enabled=false \
|
||||||
|
-n "$namespace"
|
||||||
|
|
||||||
|
# Wait for etcd cluster to be ready.
|
||||||
|
kubectl rollout status statefulset/etcd -n "$namespace"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb-operator.
|
||||||
|
function deploy_greptimedb_operator() {
|
||||||
|
# Use the latest chart and image.
|
||||||
|
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||||
|
--set image.tag=latest \
|
||||||
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Wait for greptimedb-operator to be ready.
|
||||||
|
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb cluster by using local storage.
|
||||||
|
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
|
||||||
|
function deploy_greptimedb_cluster() {
|
||||||
|
local cluster_name=$1
|
||||||
|
local install_namespace=$2
|
||||||
|
|
||||||
|
kubectl create ns "$install_namespace"
|
||||||
|
|
||||||
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
|
-n "$install_namespace"
|
||||||
|
|
||||||
|
# Wait for greptimedb cluster to be ready.
|
||||||
|
while true; do
|
||||||
|
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||||
|
if [ "$PHASE" == "Running" ]; then
|
||||||
|
echo "Cluster is ready"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||||
|
sleep 5 # wait for 5 seconds before check again.
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Expose greptimedb cluster to local access.
|
||||||
|
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||||
|
14000:4000 \
|
||||||
|
14001:4001 \
|
||||||
|
14002:4002 \
|
||||||
|
14003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb cluster by using S3.
|
||||||
|
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
|
||||||
|
function deploy_greptimedb_cluster_with_s3_storage() {
|
||||||
|
local cluster_name=$1
|
||||||
|
local install_namespace=$2
|
||||||
|
|
||||||
|
kubectl create ns "$install_namespace"
|
||||||
|
|
||||||
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
--set storage.s3.secretName=s3-credentials \
|
||||||
|
--set storage.credentials.secretName=s3-credentials \
|
||||||
|
--set storage.credentials.secretCreation.enabled=true \
|
||||||
|
--set storage.credentials.secretCreation.enableEncryption=false \
|
||||||
|
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
||||||
|
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
||||||
|
|
||||||
|
# Wait for greptimedb cluster to be ready.
|
||||||
|
while true; do
|
||||||
|
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||||
|
if [ "$PHASE" == "Running" ]; then
|
||||||
|
echo "Cluster is ready"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||||
|
sleep 5 # wait for 5 seconds before check again.
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Expose greptimedb cluster to local access.
|
||||||
|
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||||
|
24000:4000 \
|
||||||
|
24001:4001 \
|
||||||
|
24002:4002 \
|
||||||
|
24003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy standalone greptimedb.
|
||||||
|
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||||
|
function deploy_standalone_greptimedb() {
|
||||||
|
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Wait for etcd cluster to be ready.
|
||||||
|
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Expose greptimedb to local access.
|
||||||
|
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
|
||||||
|
34000:4000 \
|
||||||
|
34001:4001 \
|
||||||
|
34002:4002 \
|
||||||
|
34003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Entrypoint of the script.
|
||||||
|
function main() {
|
||||||
|
create_kind_cluster
|
||||||
|
add_greptime_chart
|
||||||
|
|
||||||
|
# Deploy standalone greptimedb in the same K8s.
|
||||||
|
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
|
||||||
|
deploy_standalone_greptimedb
|
||||||
|
fi
|
||||||
|
|
||||||
|
deploy_greptimedb_operator
|
||||||
|
deploy_greptimedb_cluster testcluster testcluster
|
||||||
|
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usages:
|
||||||
|
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
|
||||||
|
main
|
||||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
|
|||||||
17
.github/workflows/develop.yml
vendored
17
.github/workflows/develop.yml
vendored
@@ -29,7 +29,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
@@ -42,7 +42,10 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -161,15 +164,18 @@ jobs:
|
|||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Install cargo-llvm-cov
|
||||||
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
- name: Install PyArrow Package
|
- name: Install PyArrow Package
|
||||||
run: pip install pyarrow
|
run: pip install pyarrow
|
||||||
- name: Install cargo-llvm-cov
|
- name: Setup etcd server
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
working-directory: tests-integration/fixtures/etcd
|
||||||
- name: Collect coverage data
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
@@ -179,6 +185,7 @@ jobs:
|
|||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v2
|
||||||
|
|||||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Check License Header
|
- name: Check License Header
|
||||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
uses: korandoru/hawkeye@v3
|
||||||
|
|||||||
18
.github/workflows/nightly-ci.yml
vendored
18
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness:
|
sqlness:
|
||||||
@@ -34,6 +34,14 @@ jobs:
|
|||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: cargo sqlness
|
run: cargo sqlness
|
||||||
|
- name: Notify slack if failed
|
||||||
|
if: failure()
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed for sqlness tests"}
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
@@ -80,3 +88,11 @@ jobs:
|
|||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
- name: Notify slack if failed
|
||||||
|
if: failure()
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed for cargo test"}
|
||||||
|
|||||||
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
name: Nightly functional tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# At 00:00 on Tuesday.
|
||||||
|
- cron: '0 0 * * 2'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sqlness-test:
|
||||||
|
name: Run sqlness test
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Run sqlness test
|
||||||
|
uses: ./.github/actions/sqlness-test
|
||||||
|
with:
|
||||||
|
data-root: sqlness-test
|
||||||
|
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
@@ -302,8 +302,12 @@ jobs:
|
|||||||
release-cn-artifacts:
|
release-cn-artifacts:
|
||||||
name: Release artifacts to CN region
|
name: Release artifacts to CN region
|
||||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [ # The job have to wait for all the artifacts are built.
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
|
build-linux-amd64-artifacts,
|
||||||
|
build-linux-arm64-artifacts,
|
||||||
|
build-macos-artifacts,
|
||||||
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@@ -338,11 +342,12 @@ jobs:
|
|||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [ # The job have to wait for all the artifacts are built.
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
|||||||
26
.github/workflows/size-label.yml
vendored
Normal file
26
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
name: size-labeler
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Label the PR size
|
||||||
|
steps:
|
||||||
|
- uses: codelytv/pr-size-labeler@v1
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
s_label: 'Size: S'
|
||||||
|
s_max_size: '100'
|
||||||
|
m_label: 'Size: M'
|
||||||
|
m_max_size: '500'
|
||||||
|
l_label: 'Size: L'
|
||||||
|
l_max_size: '1000'
|
||||||
|
xl_label: 'Size: XL'
|
||||||
|
fail_if_xl: 'false'
|
||||||
|
message_if_xl: >
|
||||||
|
This PR exceeds the recommended size of 1000 lines.
|
||||||
|
Please make sure you are NOT addressing multiple issues with one PR.
|
||||||
|
Note this PR might be rejected due to its size.
|
||||||
|
github_api_url: 'api.github.com'
|
||||||
|
files_to_ignore: 'Cargo.lock'
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
header:
|
|
||||||
license:
|
|
||||||
spdx-id: Apache-2.0
|
|
||||||
copyright-owner: Greptime Team
|
|
||||||
|
|
||||||
paths:
|
|
||||||
- "**/*.rs"
|
|
||||||
- "**/*.py"
|
|
||||||
|
|
||||||
comment: on-failure
|
|
||||||
|
|
||||||
dependency:
|
|
||||||
files:
|
|
||||||
- Cargo.toml
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||||
|
|
||||||
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||||
|
|
||||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
||||||
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
||||||
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
||||||
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
- Submitting bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||||
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
@@ -49,6 +49,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
### Before PR
|
### Before PR
|
||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
@@ -81,7 +82,7 @@ Now, `pre-commit` will run automatically on `git commit`.
|
|||||||
### Title
|
### Title
|
||||||
|
|
||||||
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
||||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
|
like `feat`/`fix`/`docs`, with a concise summary of code change following. AVOID using the last commit message as pull request title.
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
@@ -100,7 +101,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
|||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||||
|
|
||||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
|
|||||||
3698
Cargo.lock
generated
3698
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
65
Cargo.toml
65
Cargo.toml
@@ -27,6 +27,7 @@ members = [
|
|||||||
"src/common/telemetry",
|
"src/common/telemetry",
|
||||||
"src/common/test-util",
|
"src/common/test-util",
|
||||||
"src/common/time",
|
"src/common/time",
|
||||||
|
"src/common/decimal",
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
"src/datatypes",
|
"src/datatypes",
|
||||||
@@ -35,75 +36,92 @@ members = [
|
|||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
|
"src/metric-engine",
|
||||||
"src/mito2",
|
"src/mito2",
|
||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
"src/partition",
|
"src/partition",
|
||||||
"src/plugins",
|
"src/plugins",
|
||||||
"src/promql",
|
"src/promql",
|
||||||
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
"src/script",
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
"src/storage",
|
|
||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
|
"src/index",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
]
|
]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.4.1"
|
version = "0.4.4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "43.0" }
|
arrow = { version = "47.0" }
|
||||||
arrow-array = "43.0"
|
arrow-array = "47.0"
|
||||||
arrow-flight = "43.0"
|
arrow-flight = "47.0"
|
||||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
base64 = "0.21"
|
||||||
|
bigdecimal = "0.4.2"
|
||||||
|
bitflags = "2.4.1"
|
||||||
|
bytemuck = "1.12"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
etcd-client = "0.11"
|
etcd-client = "0.12"
|
||||||
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||||
metrics = "0.20"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||||
parquet = "43.0"
|
"gen-tonic",
|
||||||
|
"metrics",
|
||||||
|
"trace",
|
||||||
|
] }
|
||||||
|
parquet = "47.0"
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
prost = "0.11"
|
pin-project = "1.0"
|
||||||
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
|
prost = "0.12"
|
||||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
|
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||||
reqwest = { version = "0.11", default-features = false, features = [
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
] }
|
] }
|
||||||
|
rust_decimal = "1.33"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
smallvec = "1"
|
smallvec = "1"
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu = "0.7"
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da", features = [
|
# on branch v0.38.x
|
||||||
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
@@ -111,8 +129,9 @@ tempfile = "3"
|
|||||||
tokio = { version = "1.28", features = ["full"] }
|
tokio = { version = "1.28", features = ["full"] }
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.7"
|
toml = "0.7"
|
||||||
tonic = { version = "0.9", features = ["tls"] }
|
tonic = { version = "0.10", features = ["tls"] }
|
||||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
@@ -123,6 +142,7 @@ common-base = { path = "src/common/base" }
|
|||||||
common-catalog = { path = "src/common/catalog" }
|
common-catalog = { path = "src/common/catalog" }
|
||||||
common-config = { path = "src/common/config" }
|
common-config = { path = "src/common/config" }
|
||||||
common-datasource = { path = "src/common/datasource" }
|
common-datasource = { path = "src/common/datasource" }
|
||||||
|
common-decimal = { path = "src/common/decimal" }
|
||||||
common-error = { path = "src/common/error" }
|
common-error = { path = "src/common/error" }
|
||||||
common-function = { path = "src/common/function" }
|
common-function = { path = "src/common/function" }
|
||||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||||
@@ -160,7 +180,6 @@ script = { path = "src/script" }
|
|||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
storage = { path = "src/storage" }
|
|
||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
|||||||
same "printed page" as the copyright notice for easier
|
same "printed page" as the copyright notice for easier
|
||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright 2022 Greptime Team
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
4
Makefile
4
Makefile
@@ -157,11 +157,11 @@ sqlness-test: ## Run sqlness test.
|
|||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
cargo check --workspace --all-targets
|
cargo check --workspace --all-targets --all-features
|
||||||
|
|
||||||
.PHONY: clippy
|
.PHONY: clippy
|
||||||
clippy: ## Check clippy rules.
|
clippy: ## Check clippy rules.
|
||||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
|
|||||||
17
README.md
17
README.md
@@ -27,14 +27,6 @@
|
|||||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Upcoming Event
|
|
||||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
|
||||||
<p align="center">
|
|
||||||
<picture>
|
|
||||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
|
||||||
</picture>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
## What is GreptimeDB
|
## What is GreptimeDB
|
||||||
|
|
||||||
GreptimeDB is an open-source time-series database with a special focus on
|
GreptimeDB is an open-source time-series database with a special focus on
|
||||||
@@ -108,7 +100,7 @@ Please see the online document site for more installation options and [operation
|
|||||||
|
|
||||||
### Get started
|
### Get started
|
||||||
|
|
||||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||||
|
|
||||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||||
|
|
||||||
@@ -117,7 +109,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
|||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
- [Pre-built Binaries](https://greptime.com/download):
|
- [Pre-built Binaries](https://greptime.com/download):
|
||||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||||
@@ -143,6 +135,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
|||||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||||
|
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
@@ -184,6 +177,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
|||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||||
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB’s meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ license.workspace = true
|
|||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
client = { workspace = true }
|
client.workspace = true
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
indicatif = "0.17.1"
|
indicatif = "0.17.1"
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
|||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
datatype: datatype.into(),
|
datatype: datatype.into(),
|
||||||
semantic_type: semantic_type as i32,
|
semantic_type: semantic_type as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
columns.push(column);
|
columns.push(column);
|
||||||
}
|
}
|
||||||
@@ -266,6 +267,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Tag as i32,
|
semantic_type: SemanticType::Tag as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tpep_pickup_datetime".to_string(),
|
name: "tpep_pickup_datetime".to_string(),
|
||||||
@@ -274,6 +276,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tpep_dropoff_datetime".to_string(),
|
name: "tpep_dropoff_datetime".to_string(),
|
||||||
@@ -282,6 +285,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "passenger_count".to_string(),
|
name: "passenger_count".to_string(),
|
||||||
@@ -290,6 +294,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "trip_distance".to_string(),
|
name: "trip_distance".to_string(),
|
||||||
@@ -298,6 +303,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "RatecodeID".to_string(),
|
name: "RatecodeID".to_string(),
|
||||||
@@ -306,6 +312,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "store_and_fwd_flag".to_string(),
|
name: "store_and_fwd_flag".to_string(),
|
||||||
@@ -314,6 +321,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "PULocationID".to_string(),
|
name: "PULocationID".to_string(),
|
||||||
@@ -322,6 +330,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "DOLocationID".to_string(),
|
name: "DOLocationID".to_string(),
|
||||||
@@ -330,6 +339,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "payment_type".to_string(),
|
name: "payment_type".to_string(),
|
||||||
@@ -338,6 +348,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "fare_amount".to_string(),
|
name: "fare_amount".to_string(),
|
||||||
@@ -346,6 +357,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "extra".to_string(),
|
name: "extra".to_string(),
|
||||||
@@ -354,6 +366,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "mta_tax".to_string(),
|
name: "mta_tax".to_string(),
|
||||||
@@ -362,6 +375,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tip_amount".to_string(),
|
name: "tip_amount".to_string(),
|
||||||
@@ -370,6 +384,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tolls_amount".to_string(),
|
name: "tolls_amount".to_string(),
|
||||||
@@ -378,6 +393,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "improvement_surcharge".to_string(),
|
name: "improvement_surcharge".to_string(),
|
||||||
@@ -386,6 +402,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "total_amount".to_string(),
|
name: "total_amount".to_string(),
|
||||||
@@ -394,6 +411,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "congestion_surcharge".to_string(),
|
name: "congestion_surcharge".to_string(),
|
||||||
@@ -402,6 +420,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "airport_fee".to_string(),
|
name: "airport_fee".to_string(),
|
||||||
@@ -410,6 +429,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
time_index: "tpep_pickup_datetime".to_string(),
|
time_index: "tpep_pickup_datetime".to_string(),
|
||||||
|
|||||||
@@ -53,33 +53,6 @@ type = "File"
|
|||||||
# The local file cache capacity in bytes.
|
# The local file cache capacity in bytes.
|
||||||
# cache_capacity = "256MB"
|
# cache_capacity = "256MB"
|
||||||
|
|
||||||
# Compaction options, see `standalone.example.toml`.
|
|
||||||
[storage.compaction]
|
|
||||||
max_inflight_tasks = 4
|
|
||||||
max_files_in_level0 = 8
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
# Storage manifest options
|
|
||||||
[storage.manifest]
|
|
||||||
# Region checkpoint actions margin.
|
|
||||||
# Create a checkpoint every <checkpoint_margin> actions.
|
|
||||||
checkpoint_margin = 10
|
|
||||||
# Region manifest logs and checkpoints gc execution duration
|
|
||||||
gc_duration = '10m'
|
|
||||||
|
|
||||||
# Storage flush options
|
|
||||||
[storage.flush]
|
|
||||||
# Max inflight flush tasks.
|
|
||||||
max_flush_tasks = 8
|
|
||||||
# Default write buffer size for a region.
|
|
||||||
region_write_buffer_size = "32MB"
|
|
||||||
# Interval to check whether a region needs flush.
|
|
||||||
picker_schedule_interval = "5m"
|
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
|
||||||
auto_flush_interval = "1h"
|
|
||||||
# Global write buffer size for all regions.
|
|
||||||
global_write_buffer_size = "1GB"
|
|
||||||
|
|
||||||
# Mito engine options
|
# Mito engine options
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
@@ -91,8 +64,8 @@ worker_channel_size = 128
|
|||||||
worker_request_batch_size = 64
|
worker_request_batch_size = 64
|
||||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
# Manifest compression type
|
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
manifest_compress_type = "Uncompressed"
|
compress_manifest = false
|
||||||
# Max number of running background jobs
|
# Max number of running background jobs
|
||||||
max_background_jobs = 4
|
max_background_jobs = 4
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
# Interval to auto flush a region if it has not flushed yet.
|
||||||
@@ -101,10 +74,16 @@ auto_flush_interval = "1h"
|
|||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
# Global write buffer size threshold to reject write requests (default 2G).
|
# Global write buffer size threshold to reject write requests (default 2G).
|
||||||
global_write_buffer_reject_size = "2GB"
|
global_write_buffer_reject_size = "2GB"
|
||||||
|
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||||
|
sst_meta_cache_size = "128MB"
|
||||||
|
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
vector_cache_size = "512MB"
|
||||||
|
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
page_cache_size = "512MB"
|
||||||
|
# Buffer size for SST writing.
|
||||||
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
# Log options
|
# Log options, see `standalone.example.toml`
|
||||||
# [logging]
|
# [logging]
|
||||||
# Specify logs directory.
|
|
||||||
# dir = "/tmp/greptimedb/logs"
|
# dir = "/tmp/greptimedb/logs"
|
||||||
# Specify the log level [info | debug | error | warn]
|
|
||||||
# level = "info"
|
# level = "info"
|
||||||
|
|||||||
@@ -28,6 +28,13 @@ max_retry_times = 12
|
|||||||
# Initial retry delay of procedures, increases exponentially
|
# Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
# Failure detectors options.
|
||||||
|
[failure_detector]
|
||||||
|
threshold = 8.0
|
||||||
|
min_std_deviation = "100ms"
|
||||||
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
|
first_heartbeat_estimate = "1000ms"
|
||||||
|
|
||||||
# # Datanode options.
|
# # Datanode options.
|
||||||
# [datanode]
|
# [datanode]
|
||||||
# # Datanode client options.
|
# # Datanode client options.
|
||||||
|
|||||||
@@ -122,35 +122,35 @@ type = "File"
|
|||||||
# The local file cache capacity in bytes.
|
# The local file cache capacity in bytes.
|
||||||
# cache_capacity = "256MB"
|
# cache_capacity = "256MB"
|
||||||
|
|
||||||
# Compaction options.
|
# Mito engine options
|
||||||
[storage.compaction]
|
[[region_engine]]
|
||||||
# Max task number that can concurrently run.
|
[region_engine.mito]
|
||||||
max_inflight_tasks = 4
|
# Number of region workers
|
||||||
# Max files in level 0 to trigger compaction.
|
num_workers = 8
|
||||||
max_files_in_level0 = 8
|
# Request channel size of each worker
|
||||||
# Max task number for SST purge task after compaction.
|
worker_channel_size = 128
|
||||||
max_purge_tasks = 32
|
# Max batch size for a worker to handle requests
|
||||||
|
worker_request_batch_size = 64
|
||||||
# Storage manifest options
|
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||||
[storage.manifest]
|
manifest_checkpoint_distance = 10
|
||||||
# Region checkpoint actions margin.
|
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
# Create a checkpoint every <checkpoint_margin> actions.
|
compress_manifest = false
|
||||||
checkpoint_margin = 10
|
# Max number of running background jobs
|
||||||
# Region manifest logs and checkpoints gc execution duration
|
max_background_jobs = 4
|
||||||
gc_duration = '10m'
|
|
||||||
|
|
||||||
# Storage flush options
|
|
||||||
[storage.flush]
|
|
||||||
# Max inflight flush tasks.
|
|
||||||
max_flush_tasks = 8
|
|
||||||
# Default write buffer size for a region.
|
|
||||||
region_write_buffer_size = "32MB"
|
|
||||||
# Interval to check whether a region needs flush.
|
|
||||||
picker_schedule_interval = "5m"
|
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
# Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
# Global write buffer size for all regions.
|
# Global write buffer size for all regions.
|
||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
|
# Global write buffer size threshold to reject write requests (default 2G).
|
||||||
|
global_write_buffer_reject_size = "2GB"
|
||||||
|
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||||
|
sst_meta_cache_size = "128MB"
|
||||||
|
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
vector_cache_size = "512MB"
|
||||||
|
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
page_cache_size = "512MB"
|
||||||
|
# Buffer size for SST writing.
|
||||||
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
# Log options
|
# Log options
|
||||||
# [logging]
|
# [logging]
|
||||||
@@ -158,3 +158,9 @@ global_write_buffer_size = "1GB"
|
|||||||
# dir = "/tmp/greptimedb/logs"
|
# dir = "/tmp/greptimedb/logs"
|
||||||
# Specify the log level [info | debug | error | warn]
|
# Specify the log level [info | debug | error | warn]
|
||||||
# level = "info"
|
# level = "info"
|
||||||
|
# whether enable tracing, default is false
|
||||||
|
# enable_otlp_tracing = false
|
||||||
|
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||||
|
# otlp_endpoint = "localhost:4317"
|
||||||
|
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
# tracing_sample_ratio = 1.0
|
||||||
|
|||||||
@@ -19,8 +19,13 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
python3.10 \
|
python3.10 \
|
||||||
python3.10-dev \
|
python3.10-dev
|
||||||
python3-pip
|
|
||||||
|
# Remove Python 3.8 and install pip.
|
||||||
|
RUN apt-get -y purge python3.8 && \
|
||||||
|
apt-get -y autoremove && \
|
||||||
|
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||||
|
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||||
|
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|
||||||
|
|||||||
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# Use the legacy glibc 2.28.
|
||||||
|
FROM ubuntu:18.10
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||||
|
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
unzip \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
# Install protoc.
|
||||||
|
ENV PROTOC_VERSION=25.1
|
||||||
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture"; exit 1; \
|
||||||
|
fi && \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||||
|
rm -f ${PROTOC_ZIP}
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Install Rust toolchains.
|
||||||
|
ARG RUST_TOOLCHAIN
|
||||||
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install nextest.
|
||||||
|
RUN cargo install cargo-nextest --locked
|
||||||
@@ -50,10 +50,10 @@ The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series
|
|||||||
```
|
```
|
||||||
|
|
||||||
The following parts will describe these implementation details:
|
The following parts will describe these implementation details:
|
||||||
- How to route these metric region tables and how those table are distributed
|
- How to route these metric region tables and how those table are distributed
|
||||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||||
- How to maintain the schema of metric engine table
|
- How to maintain the schema of metric engine table
|
||||||
- How the query goes
|
- How the query goes
|
||||||
|
|
||||||
## Routing
|
## Routing
|
||||||
|
|
||||||
|
|||||||
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Inverted Index for SST File
|
||||||
|
Tracking Issue: TBD
|
||||||
|
Date: 2023-11-03
|
||||||
|
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes an optimization towards the storage engine by introducing an inverted indexing methodology aimed at optimizing label selection queries specifically pertaining to Metrics with tag columns as the target for optimization.
|
||||||
|
|
||||||
|
# Introduction
|
||||||
|
In the current system setup, in the Mito Engine, the first column of Primary Keys has a Min-Max index, which significantly optimizes the outcome. However, there are limitations when it comes to other columns, primarily tags. This RFC suggests the implementation of an inverted index to provide enhanced filtering benefits to bridge these limitations and improve overall system performance.
|
||||||
|
|
||||||
|
# Design Detail
|
||||||
|
|
||||||
|
## Inverted Index
|
||||||
|
|
||||||
|
The primary aim of the proposed inverted index is to optimize tag columns in the SST Parquet Files within the Mito Engine. The mapping and construction of an inverted index, from Tag Values to Row Groups, enables efficient logical structures that provide faster and more flexible queries.
|
||||||
|
|
||||||
|
When scanning SST Files, pushed-down filters applied to a respective Tag's inverted index, determine the final Row Groups to be indexed and scanned, further bolstering the speed and efficiency of data retrieval processes.
|
||||||
|
|
||||||
|
## Index Format
|
||||||
|
|
||||||
|
The Inverted Index for each SST file follows the format shown below:
|
||||||
|
|
||||||
|
```
|
||||||
|
inverted_index₀ inverted_index₁ ... inverted_indexₙ footer
|
||||||
|
```
|
||||||
|
|
||||||
|
The structure inside each Inverted Index is as followed:
|
||||||
|
|
||||||
|
```
|
||||||
|
bitmap₀ bitmap₁ bitmap₂ ... bitmapₙ null_bitmap fst
|
||||||
|
```
|
||||||
|
|
||||||
|
The format is encapsulated by a footer:
|
||||||
|
|
||||||
|
```
|
||||||
|
footer_payload footer_payload_size
|
||||||
|
```
|
||||||
|
|
||||||
|
The `footer_payload` is presented in protobuf encoding of `InvertedIndexFooter`.
|
||||||
|
|
||||||
|
The complete format is containerized in [Puffin](https://iceberg.apache.org/puffin-spec/) with the type defined as `greptime-inverted-index-v1`.
|
||||||
|
|
||||||
|
## Protobuf Details
|
||||||
|
|
||||||
|
The `InvertedIndexFooter` is defined in the following protobuf structure:
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
message InvertedIndexFooter {
|
||||||
|
repeated InvertedIndexMeta metas;
|
||||||
|
}
|
||||||
|
|
||||||
|
message InvertedIndexMeta {
|
||||||
|
string name;
|
||||||
|
uint64 row_count_in_group;
|
||||||
|
uint64 fst_offset;
|
||||||
|
uint64 fst_size;
|
||||||
|
uint64 null_bitmap_offset;
|
||||||
|
uint64 null_bitmap_size;
|
||||||
|
InvertedIndexStats stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
message InvertedIndexStats {
|
||||||
|
uint64 null_count;
|
||||||
|
uint64 distinct_count;
|
||||||
|
bytes min_value;
|
||||||
|
bytes max_value;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Bitmap
|
||||||
|
|
||||||
|
Bitmaps are used to represent indices of fixed-size groups. Rows are divided into groups of a fixed size, defined in the `InvertedIndexMeta` as `row_count_in_group`.
|
||||||
|
|
||||||
|
For example, when `row_count_in_group` is `4096`, it means each group has `4096` rows. If there are a total of `10000` rows, there will be `3` groups in total. The first two groups will have `4096` rows each, and the last group will have `1808` rows. If the indexed values are found in row `200` and `9000`, they will correspond to groups `0` and `2`, respectively. Therefore, the bitmap should show `0` and `2`.
|
||||||
|
|
||||||
|
Bitmap is implemented using [BitVec](https://docs.rs/bitvec/latest/bitvec/), selected due to its efficient representation of dense data arrays typical of indices of groups.
|
||||||
|
|
||||||
|
|
||||||
|
## Finite State Transducer (FST)
|
||||||
|
|
||||||
|
[FST](https://docs.rs/fst/latest/fst/) is a highly efficient data structure ideal for in-memory indexing. It represents ordered sets or maps where the keys are bytes. The choice of the FST effectively balances the need for performance, space efficiency, and the ability to perform complex analyses such as regular expression matching.
|
||||||
|
|
||||||
|
The conventional usage of FST and `u64` values has been adapted to facilitate indirect indexing to row groups. As the row groups are represented as Bitmaps, we utilize the `u64` values split into bitmap's offset (higher 32 bits) and size (lower 32 bits) to represent the location of these Bitmaps.
|
||||||
|
|
||||||
|
## API Design
|
||||||
|
|
||||||
|
Two APIs `InvertedIndexBuilder` for building indexes and `InvertedIndexSearcher` for querying indexes are designed:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
type Bytes = Vec<u8>;
|
||||||
|
type GroupId = u64;
|
||||||
|
|
||||||
|
trait InvertedIndexBuilder {
|
||||||
|
fn add(&mut self, name: &str, value: Option<&Bytes>, group_id: GroupId) -> Result<()>;
|
||||||
|
fn finish(&mut self) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Predicate {
|
||||||
|
Gt(Bytes),
|
||||||
|
GtEq(Bytes),
|
||||||
|
Lt(Bytes),
|
||||||
|
LtEq(Bytes),
|
||||||
|
InList(Vec<Bytes>),
|
||||||
|
RegexMatch(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
trait InvertedIndexSearcher {
|
||||||
|
fn search(&mut self, name: &str, predicates: &[Predicate]) -> Result<impl IntoIterator<GroupId>>;
|
||||||
|
}
|
||||||
|
```
|
||||||
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Region Migration Procedure
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2700
|
||||||
|
Date: 2023-11-03
|
||||||
|
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes a way that brings the ability of Meta Server to move regions between the Datanodes.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
Typically, We need this ability in the following scenarios:
|
||||||
|
- Migrate hot-spot Regions to idle Datanode
|
||||||
|
- Move the failure Regions to an available Datanode
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
style Start fill:#85CB90,color:#fff
|
||||||
|
style End fill:#85CB90,color:#fff
|
||||||
|
style SelectCandidate fill:#F38488,color:#fff
|
||||||
|
style OpenCandidate fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataDown fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataUp fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataRollback fill:#F38488,color:#fff
|
||||||
|
style DowngradeLeader fill:#F38488,color:#fff
|
||||||
|
style UpgradeCandidate fill:#F38488,color:#fff
|
||||||
|
|
||||||
|
Start[Start]
|
||||||
|
SelectCandidate[Select Candidate]
|
||||||
|
UpdateMetadataDown["`Update Metadata(Down)
|
||||||
|
1. Downgrade Leader
|
||||||
|
`"]
|
||||||
|
DowngradeLeader["`Downgrade Leader
|
||||||
|
1. Become Follower
|
||||||
|
2. Return **last_entry_id**
|
||||||
|
`"]
|
||||||
|
UpgradeCandidate["`Upgrade Candidate
|
||||||
|
1. Replay to **last_entry_id**
|
||||||
|
2. Become Leader
|
||||||
|
`"]
|
||||||
|
UpdateMetadataUp["`Update Metadata(Up)
|
||||||
|
1. Switch Leader
|
||||||
|
2.1. Remove Old Leader(Opt.)
|
||||||
|
2.2. Move Old Leader to Follower(Opt.)
|
||||||
|
`"]
|
||||||
|
UpdateMetadataRollback["`Update Metadata(Rollback)
|
||||||
|
1. Upgrade old Leader
|
||||||
|
`"]
|
||||||
|
End
|
||||||
|
AnyCandidate{Available?}
|
||||||
|
OpenCandidate["Open Candidate"]
|
||||||
|
CloseOldLeader["Close Old Leader"]
|
||||||
|
|
||||||
|
Start
|
||||||
|
--> SelectCandidate
|
||||||
|
--> AnyCandidate
|
||||||
|
--> |Yes| UpdateMetadataDown
|
||||||
|
--> I1["Invalid Frontend Cache"]
|
||||||
|
--> DowngradeLeader
|
||||||
|
--> UpgradeCandidate
|
||||||
|
--> UpdateMetadataUp
|
||||||
|
--> I2["Invalid Frontend Cache"]
|
||||||
|
--> End
|
||||||
|
|
||||||
|
UpgradeCandidate
|
||||||
|
--> UpdateMetadataRollback
|
||||||
|
--> I3["Invalid Frontend Cache"]
|
||||||
|
--> End
|
||||||
|
|
||||||
|
I2
|
||||||
|
--> CloseOldLeader
|
||||||
|
--> End
|
||||||
|
|
||||||
|
AnyCandidate
|
||||||
|
--> |No| OpenCandidate
|
||||||
|
--> UpdateMetadataDown
|
||||||
|
```
|
||||||
|
|
||||||
|
**Only the red nodes will persist state after it has succeeded**, and other nodes won't persist state. (excluding the Start and End nodes).
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
**The persistent context:** It's shared in each step and available after recovering. It will only be updated/stored after the Red node has succeeded.
|
||||||
|
|
||||||
|
Values:
|
||||||
|
- `region_id`: The target leader region.
|
||||||
|
- `peer`: The target datanode.
|
||||||
|
- `close_old_leader`: Indicates whether close the region.
|
||||||
|
- `leader_may_unreachable`: It's used to support the failover procedure.
|
||||||
|
|
||||||
|
**The Volatile context:** It's shared in each step and available in executing (including retrying). It will be dropped if the procedure runner crashes.
|
||||||
|
|
||||||
|
### Select Candidate
|
||||||
|
|
||||||
|
The Persistent state: Selected Candidate Region.
|
||||||
|
|
||||||
|
### Update Metadata(Down)
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
- The (latest/updated) `version` of `TableRouteValue`, It will be used in the step of `Update Metadata(Up)`.
|
||||||
|
|
||||||
|
### Downgrade Leader
|
||||||
|
This step sends an instruction via heartbeat and performs:
|
||||||
|
1. Downgrades leader region.
|
||||||
|
2. Retrieves the `last_entry_id` (if available).
|
||||||
|
|
||||||
|
If the target leader region is not found:
|
||||||
|
- Sets `close_old_leader` to true.
|
||||||
|
- Sets `leader_may_unreachable` to true.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Waits for region lease expired.
|
||||||
|
- Sets `close_old_leader` to true.
|
||||||
|
- Sets `leader_may_unreachable` to true.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
**The Persistent state:**
|
||||||
|
- `last_entry_id`
|
||||||
|
|
||||||
|
*Passes to next step.
|
||||||
|
|
||||||
|
|
||||||
|
### Upgrade Candidate
|
||||||
|
This step sends an instruction via heartbeat and performs:
|
||||||
|
1. Replays the WAL to latest(`last_entry_id`).
|
||||||
|
2. Upgrades the candidate region.
|
||||||
|
|
||||||
|
If the target region is not found:
|
||||||
|
- Rollbacks.
|
||||||
|
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||||
|
- Exits procedure.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Rollbacks.
|
||||||
|
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||||
|
- Exits procedure.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
### Update Metadata(Up)
|
||||||
|
This step performs
|
||||||
|
1. Switches Leader.
|
||||||
|
2. Removes Old Leader(Opt.).
|
||||||
|
3. Moves Old Leader to follower(Opt.).
|
||||||
|
|
||||||
|
The `TableRouteValue` version should equal the `TableRouteValue`'s `version` in Persistent context. Otherwise, verifies whether `TableRouteValue` already updated.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
### Close Old Leader(Opt.)
|
||||||
|
This step sends a close region instruction via heartbeat.
|
||||||
|
|
||||||
|
If the target leader region is not found:
|
||||||
|
- Ignore.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Ignore.
|
||||||
|
|
||||||
|
### Open Candidate(Opt.)
|
||||||
|
This step sends an open region instruction via heartbeat and waits for conditions to be met (typically, the condition is that the `last_entry_id` of the Candidate Region is very close to that of the Leader Region or the latest).
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Exits procedure.
|
||||||
24
licenserc.toml
Normal file
24
licenserc.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Copyright 2023 Greptime Team
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
headerPath = "Apache-2.0.txt"
|
||||||
|
|
||||||
|
includes = [
|
||||||
|
"*.rs",
|
||||||
|
"*.py",
|
||||||
|
]
|
||||||
|
|
||||||
|
[properties]
|
||||||
|
inceptionYear = 2023
|
||||||
|
copyrightOwner = "Greptime Team"
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2023-08-07"
|
channel = "nightly-2023-10-21"
|
||||||
|
|||||||
157
scripts/run-pyo3-greptime.sh
Executable file
157
scripts/run-pyo3-greptime.sh
Executable file
@@ -0,0 +1,157 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script configures the environment to run 'greptime' with the required Python version
|
||||||
|
|
||||||
|
# This script should be compatible both in Linux and macOS
|
||||||
|
OS_TYPE="$(uname)"
|
||||||
|
readonly OS_TYPE
|
||||||
|
|
||||||
|
check_command_existence() {
|
||||||
|
command -v "$1" &> /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
get_python_version() {
|
||||||
|
case "$OS_TYPE" in
|
||||||
|
Darwin)
|
||||||
|
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
|
||||||
|
;;
|
||||||
|
Linux)
|
||||||
|
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unsupported OS type: $OS_TYPE"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_virtualenv() {
|
||||||
|
local req_py_version="$1"
|
||||||
|
local env_name="GreptimeTmpVenv$req_py_version"
|
||||||
|
virtualenv --python=python"$req_py_version" "$env_name"
|
||||||
|
source "$env_name/bin/activate"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_conda_env() {
|
||||||
|
local req_py_version="$1"
|
||||||
|
local conda_base
|
||||||
|
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
|
||||||
|
. "$conda_base/etc/profile.d/conda.sh"
|
||||||
|
|
||||||
|
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
|
||||||
|
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
|
||||||
|
fi
|
||||||
|
|
||||||
|
conda activate "GreptimeTmpPyO3Env$req_py_version"
|
||||||
|
}
|
||||||
|
|
||||||
|
GREPTIME_BIN_PATH="./greptime"
|
||||||
|
YES="false"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage:"
|
||||||
|
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
|
||||||
|
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function parse_args() {
|
||||||
|
while getopts ":f:y" opt; do
|
||||||
|
case $opt in
|
||||||
|
f)
|
||||||
|
GREPTIME_BIN_PATH=$OPTARG
|
||||||
|
;;
|
||||||
|
y)
|
||||||
|
YES="yes"
|
||||||
|
;;
|
||||||
|
\?)
|
||||||
|
echo "Invalid option: -$OPTARG" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
:)
|
||||||
|
echo "Option -$OPTARG requires an argument." >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
shift $((OPTIND -1))
|
||||||
|
|
||||||
|
REST_ARGS=$*
|
||||||
|
|
||||||
|
if [ -z "$GREPTIME_BIN_PATH" ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
|
||||||
|
echo "The args pass to greptime: '$REST_ARGS'"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set library path and pass all arguments to greptime to run it
|
||||||
|
execute_greptime() {
|
||||||
|
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||||
|
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||||
|
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
||||||
|
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
parse_args $@
|
||||||
|
|
||||||
|
local req_py_version
|
||||||
|
req_py_version=$(get_python_version)
|
||||||
|
readonly req_py_version
|
||||||
|
|
||||||
|
if [[ -z "$req_py_version" ]]; then
|
||||||
|
if $GREPTIME_BIN_PATH --version &> /dev/null; then
|
||||||
|
$GREPTIME_BIN_PATH $REST_ARGS
|
||||||
|
else
|
||||||
|
echo "The 'greptime' binary is not valid or encountered an error."
|
||||||
|
$GREPTIME_BIN_PATH --version
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "The required version of Python shared library is $req_py_version"
|
||||||
|
|
||||||
|
# if YES exist, assign it to yn, else read from stdin
|
||||||
|
if [[ -z "$YES" ]]; then
|
||||||
|
echo "Now this script will try to install or find correct Python Version"
|
||||||
|
echo "Do you want to continue? (yes/no): "
|
||||||
|
read -r yn
|
||||||
|
else
|
||||||
|
yn="$YES"
|
||||||
|
fi
|
||||||
|
case $yn in
|
||||||
|
[Yy]* ) ;;
|
||||||
|
[Nn]* ) exit;;
|
||||||
|
* ) echo "Please answer yes or no.";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# if USE_ENV exist, assign it to option
|
||||||
|
# else read from stdin
|
||||||
|
if [[ -z "$PY_ENV_MAN" ]]; then
|
||||||
|
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
|
||||||
|
read -r option
|
||||||
|
else
|
||||||
|
option="$PY_ENV_MAN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $option in
|
||||||
|
1)
|
||||||
|
setup_virtualenv "$req_py_version"
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
setup_conda_env "$req_py_version"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Please input 1 or 2"; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
execute_greptime $REST_ARGS
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -5,14 +5,16 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-error = { workspace = true }
|
common-decimal.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-error.workspace = true
|
||||||
common-time = { workspace = true }
|
common-macro.workspace = true
|
||||||
datatypes = { workspace = true }
|
common-time.workspace = true
|
||||||
|
datatypes.workspace = true
|
||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
|
paste = "1.0"
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
|||||||
@@ -28,7 +28,12 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
#[stack_trace_debug]
|
#[stack_trace_debug]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||||
UnknownColumnDataType { datatype: i32, location: Location },
|
UnknownColumnDataType {
|
||||||
|
datatype: i32,
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: prost::DecodeError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
IntoColumnDataType {
|
IntoColumnDataType {
|
||||||
|
|||||||
@@ -15,6 +15,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_base::BitVec;
|
use common_base::BitVec;
|
||||||
|
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||||
|
use common_decimal::Decimal128;
|
||||||
use common_time::interval::IntervalUnit;
|
use common_time::interval::IntervalUnit;
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
@@ -26,47 +28,71 @@ use datatypes::types::{
|
|||||||
};
|
};
|
||||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||||
UInt64Vector, VectorRef,
|
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||||
};
|
};
|
||||||
|
use greptime_proto::v1;
|
||||||
|
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||||
use greptime_proto::v1::ddl_request::Expr;
|
use greptime_proto::v1::ddl_request::Expr;
|
||||||
use greptime_proto::v1::greptime_request::Request;
|
use greptime_proto::v1::greptime_request::Request;
|
||||||
use greptime_proto::v1::query_request::Query;
|
use greptime_proto::v1::query_request::Query;
|
||||||
use greptime_proto::v1::value::ValueData;
|
use greptime_proto::v1::value::ValueData;
|
||||||
use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
|
use greptime_proto::v1::{
|
||||||
|
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
||||||
|
};
|
||||||
|
use paste::paste;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::v1::column::Values;
|
use crate::v1::column::Values;
|
||||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
/// ColumnDataTypeWrapper is a wrapper of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub struct ColumnDataTypeWrapper(ColumnDataType);
|
/// It could be used to convert with ConcreteDataType.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType,
|
||||||
|
datatype_ext: Option<ColumnDataTypeExtension>,
|
||||||
|
}
|
||||||
|
|
||||||
impl ColumnDataTypeWrapper {
|
impl ColumnDataTypeWrapper {
|
||||||
pub fn try_new(datatype: i32) -> Result<Self> {
|
/// Try to create a ColumnDataTypeWrapper from i32(ColumnDataType) and ColumnDataTypeExtension.
|
||||||
let datatype = ColumnDataType::from_i32(datatype)
|
pub fn try_new(datatype: i32, datatype_ext: Option<ColumnDataTypeExtension>) -> Result<Self> {
|
||||||
|
let datatype = ColumnDataType::try_from(datatype)
|
||||||
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
||||||
Ok(Self(datatype))
|
Ok(Self {
|
||||||
|
datatype,
|
||||||
|
datatype_ext,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(datatype: ColumnDataType) -> Self {
|
/// Create a ColumnDataTypeWrapper from ColumnDataType and ColumnDataTypeExtension.
|
||||||
Self(datatype)
|
pub fn new(datatype: ColumnDataType, datatype_ext: Option<ColumnDataTypeExtension>) -> Self {
|
||||||
|
Self {
|
||||||
|
datatype,
|
||||||
|
datatype_ext,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the ColumnDataType.
|
||||||
pub fn datatype(&self) -> ColumnDataType {
|
pub fn datatype(&self) -> ColumnDataType {
|
||||||
self.0
|
self.datatype
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
|
(self.datatype, self.datatype_ext.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||||
fn from(datatype: ColumnDataTypeWrapper) -> Self {
|
fn from(datatype_wrapper: ColumnDataTypeWrapper) -> Self {
|
||||||
match datatype.0 {
|
match datatype_wrapper.datatype {
|
||||||
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
||||||
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
||||||
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
||||||
@@ -109,6 +135,100 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ConcreteDataType::duration_microsecond_datatype()
|
ConcreteDataType::duration_microsecond_datatype()
|
||||||
}
|
}
|
||||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||||
|
ColumnDataType::Decimal128 => {
|
||||||
|
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||||
|
.datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ConcreteDataType::decimal128_datatype(d.precision as u8, d.scale as i8)
|
||||||
|
} else {
|
||||||
|
ConcreteDataType::decimal128_default_datatype()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This macro is used to generate datatype functions
|
||||||
|
/// with lower style for ColumnDataTypeWrapper.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// For example: we can use `ColumnDataTypeWrapper::int8_datatype()`,
|
||||||
|
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::Int8`.
|
||||||
|
macro_rules! impl_column_type_functions {
|
||||||
|
($($Type: ident), +) => {
|
||||||
|
paste! {
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
$(
|
||||||
|
pub fn [<$Type:lower _datatype>]() -> ColumnDataTypeWrapper {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::$Type,
|
||||||
|
datatype_ext: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This macro is used to generate datatype functions
|
||||||
|
/// with snake style for ColumnDataTypeWrapper.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// For example: we can use `ColumnDataTypeWrapper::duration_second_datatype()`,
|
||||||
|
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::DurationSecond`.
|
||||||
|
macro_rules! impl_column_type_functions_with_snake {
|
||||||
|
($($TypeName: ident), +) => {
|
||||||
|
paste!{
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
$(
|
||||||
|
pub fn [<$TypeName:snake _datatype>]() -> ColumnDataTypeWrapper {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::$TypeName,
|
||||||
|
datatype_ext: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_column_type_functions!(
|
||||||
|
Boolean, Uint8, Uint16, Uint32, Uint64, Int8, Int16, Int32, Int64, Float32, Float64, Binary,
|
||||||
|
Date, Datetime, String
|
||||||
|
);
|
||||||
|
|
||||||
|
impl_column_type_functions_with_snake!(
|
||||||
|
TimestampSecond,
|
||||||
|
TimestampMillisecond,
|
||||||
|
TimestampMicrosecond,
|
||||||
|
TimestampNanosecond,
|
||||||
|
TimeSecond,
|
||||||
|
TimeMillisecond,
|
||||||
|
TimeMicrosecond,
|
||||||
|
TimeNanosecond,
|
||||||
|
IntervalYearMonth,
|
||||||
|
IntervalDayTime,
|
||||||
|
IntervalMonthDayNano,
|
||||||
|
DurationSecond,
|
||||||
|
DurationMillisecond,
|
||||||
|
DurationMicrosecond,
|
||||||
|
DurationNanosecond
|
||||||
|
);
|
||||||
|
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
pub fn decimal128_datatype(precision: i32, scale: i32) -> Self {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::Decimal128,
|
||||||
|
datatype_ext: Some(ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||||
|
precision,
|
||||||
|
scale,
|
||||||
|
})),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,7 +237,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
type Error = error::Error;
|
type Error = error::Error;
|
||||||
|
|
||||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||||
let datatype = ColumnDataTypeWrapper(match datatype {
|
let column_datatype = match datatype {
|
||||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||||
@@ -156,13 +276,30 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||||
},
|
},
|
||||||
|
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_) => {
|
| ConcreteDataType::Dictionary(_) => {
|
||||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
Ok(datatype)
|
let datatype_extension = match column_datatype {
|
||||||
|
ColumnDataType::Decimal128 => {
|
||||||
|
datatype
|
||||||
|
.as_decimal128()
|
||||||
|
.map(|decimal_type| ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||||
|
precision: decimal_type.precision() as i32,
|
||||||
|
scale: decimal_type.scale() as i32,
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
Ok(Self {
|
||||||
|
datatype: column_datatype,
|
||||||
|
datatype_ext: datatype_extension,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,6 +425,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
|||||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
ColumnDataType::Decimal128 => Values {
|
||||||
|
decimal128_values: Vec::with_capacity(capacity),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,6 +482,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
|||||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||||
},
|
},
|
||||||
|
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) => unreachable!(),
|
||||||
});
|
});
|
||||||
column.null_mask = null_mask.into_vec();
|
column.null_mask = null_mask.into_vec();
|
||||||
@@ -381,17 +523,29 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||||
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
|
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||||
let interval = Interval::from_i128(v);
|
let interval = Interval::from_i128(v);
|
||||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months,
|
months,
|
||||||
days,
|
days,
|
||||||
nanoseconds,
|
nanoseconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||||
|
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||||
|
let value = v.val();
|
||||||
|
v1::Decimal128 {
|
||||||
|
hi: (value >> 64) as i64,
|
||||||
|
lo: value as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pb_value_to_value_ref<'a>(
|
||||||
|
value: &'a v1::Value,
|
||||||
|
datatype_ext: &'a Option<ColumnDataTypeExtension>,
|
||||||
|
) -> ValueRef<'a> {
|
||||||
let Some(value) = &value.value_data else {
|
let Some(value) = &value.value_data else {
|
||||||
return ValueRef::Null;
|
return ValueRef::Null;
|
||||||
};
|
};
|
||||||
@@ -436,6 +590,28 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
|||||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||||
|
ValueData::Decimal128Value(v) => {
|
||||||
|
// get precision and scale from datatype_extension
|
||||||
|
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|column_ext| column_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
d.precision as u8,
|
||||||
|
d.scale as i8,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
// If the precision and scale are not set, use the default value.
|
||||||
|
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
DECIMAL128_MAX_PRECISION,
|
||||||
|
DECIMAL128_DEFAULT_SCALE,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,6 +698,11 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
values.duration_nanosecond_values,
|
values.duration_nanosecond_values,
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
|
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||||
|
values.decimal128_values.iter().map(|x| {
|
||||||
|
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||||
|
}),
|
||||||
|
)),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
@@ -692,6 +873,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||||
.collect(),
|
.collect(),
|
||||||
|
ConcreteDataType::Decimal128(d) => values
|
||||||
|
.decimal128_values
|
||||||
|
.into_iter()
|
||||||
|
.map(|v| {
|
||||||
|
Value::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
d.precision(),
|
||||||
|
d.scale(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
@@ -704,12 +897,14 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the pb type value is valid.
|
/// Returns true if the pb type value is valid.
|
||||||
pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
|
pub fn is_column_type_value_eq(
|
||||||
let Some(column_type) = ColumnDataType::from_i32(type_value) else {
|
type_value: i32,
|
||||||
return false;
|
type_extension: Option<ColumnDataTypeExtension>,
|
||||||
};
|
expect_type: &ConcreteDataType,
|
||||||
|
) -> bool {
|
||||||
is_column_type_eq(column_type, expect_type)
|
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||||
|
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
||||||
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert value into proto's value.
|
/// Convert value into proto's value.
|
||||||
@@ -816,13 +1011,19 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Value::Decimal128(v) => {
|
||||||
|
let (hi, lo) = v.split_value();
|
||||||
|
v1::Value {
|
||||||
|
value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
|
||||||
|
}
|
||||||
|
}
|
||||||
Value::List(_) => return None,
|
Value::List(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(proto_value)
|
Some(proto_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [ColumnDataType] of the value.
|
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||||
///
|
///
|
||||||
/// If value is null, returns `None`.
|
/// If value is null, returns `None`.
|
||||||
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||||
@@ -857,65 +1058,11 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
|||||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||||
|
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||||
};
|
};
|
||||||
Some(value_type)
|
Some(value_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert [ConcreteDataType] to [ColumnDataType].
|
|
||||||
pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
|
|
||||||
let column_data_type = match data_type {
|
|
||||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
|
||||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
|
||||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
|
||||||
ConcreteDataType::Int32(_) => ColumnDataType::Int32,
|
|
||||||
ConcreteDataType::Int64(_) => ColumnDataType::Int64,
|
|
||||||
ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
|
|
||||||
ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
|
|
||||||
ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
|
|
||||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
|
||||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
|
||||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
|
||||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
|
||||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
|
||||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
|
||||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
|
|
||||||
ColumnDataType::TimestampMillisecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
|
|
||||||
ColumnDataType::TimestampMicrosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
|
|
||||||
ColumnDataType::TimestampNanosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
|
|
||||||
ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
|
|
||||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
|
|
||||||
ColumnDataType::DurationMillisecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
|
|
||||||
ColumnDataType::DurationMicrosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
|
|
||||||
ColumnDataType::DurationNanosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
|
|
||||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
|
|
||||||
ColumnDataType::IntervalMonthDayNano
|
|
||||||
}
|
|
||||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
|
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
|
||||||
return None
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(column_data_type)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn vectors_to_rows<'a>(
|
pub fn vectors_to_rows<'a>(
|
||||||
columns: impl Iterator<Item = &'a VectorRef>,
|
columns: impl Iterator<Item = &'a VectorRef>,
|
||||||
row_count: usize,
|
row_count: usize,
|
||||||
@@ -974,20 +1121,15 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
|
Value::Decimal128(v) => {
|
||||||
|
let (hi, lo) = v.split_value();
|
||||||
|
Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
|
||||||
|
}
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the column type is equal to expected type.
|
|
||||||
fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
|
|
||||||
if let Some(expect) = to_column_data_type(expect_type) {
|
|
||||||
column_type == expect
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -1081,189 +1223,204 @@ mod tests {
|
|||||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||||
let values = values.duration_millisecond_values;
|
let values = values.duration_millisecond_values;
|
||||||
assert_eq!(2, values.capacity());
|
assert_eq!(2, values.capacity());
|
||||||
|
|
||||||
|
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||||
|
let values = values.decimal128_values;
|
||||||
|
assert_eq!(2, values.capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_concrete_datatype_from_column_datatype() {
|
fn test_concrete_datatype_from_column_datatype() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::boolean_datatype(),
|
ConcreteDataType::boolean_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
|
ColumnDataTypeWrapper::boolean_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int8_datatype(),
|
ConcreteDataType::int8_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int8).into()
|
ColumnDataTypeWrapper::int8_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int16_datatype(),
|
ConcreteDataType::int16_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int16).into()
|
ColumnDataTypeWrapper::int16_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int32_datatype(),
|
ConcreteDataType::int32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int32).into()
|
ColumnDataTypeWrapper::int32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int64_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int64).into()
|
ColumnDataTypeWrapper::int64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint8_datatype(),
|
ConcreteDataType::uint8_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
|
ColumnDataTypeWrapper::uint8_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint16_datatype(),
|
ConcreteDataType::uint16_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
|
ColumnDataTypeWrapper::uint16_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint32_datatype(),
|
ConcreteDataType::uint32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
|
ColumnDataTypeWrapper::uint32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint64_datatype(),
|
ConcreteDataType::uint64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
|
ColumnDataTypeWrapper::uint64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::float32_datatype(),
|
ConcreteDataType::float32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float32).into()
|
ColumnDataTypeWrapper::float32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::float64_datatype(),
|
ConcreteDataType::float64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float64).into()
|
ColumnDataTypeWrapper::float64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::binary_datatype(),
|
ConcreteDataType::binary_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Binary).into()
|
ColumnDataTypeWrapper::binary_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::string_datatype(),
|
ConcreteDataType::string_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::String).into()
|
ColumnDataTypeWrapper::string_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::date_datatype(),
|
ConcreteDataType::date_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Date).into()
|
ColumnDataTypeWrapper::date_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::datetime_datatype(),
|
ConcreteDataType::datetime_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
|
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
|
ColumnDataTypeWrapper::timestamp_millisecond_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
ColumnDataTypeWrapper::time_millisecond_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
|
ColumnDataTypeWrapper::interval_day_time_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
|
ColumnDataTypeWrapper::interval_year_month_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
|
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::duration_millisecond_datatype(),
|
ConcreteDataType::duration_millisecond_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
|
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::decimal128_datatype(10, 2),
|
||||||
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_column_datatype_from_concrete_datatype() {
|
fn test_column_datatype_from_concrete_datatype() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Boolean),
|
ColumnDataTypeWrapper::boolean_datatype(),
|
||||||
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int8),
|
ColumnDataTypeWrapper::int8_datatype(),
|
||||||
ConcreteDataType::int8_datatype().try_into().unwrap()
|
ConcreteDataType::int8_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int16),
|
ColumnDataTypeWrapper::int16_datatype(),
|
||||||
ConcreteDataType::int16_datatype().try_into().unwrap()
|
ConcreteDataType::int16_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int32),
|
ColumnDataTypeWrapper::int32_datatype(),
|
||||||
ConcreteDataType::int32_datatype().try_into().unwrap()
|
ConcreteDataType::int32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int64),
|
ColumnDataTypeWrapper::int64_datatype(),
|
||||||
ConcreteDataType::int64_datatype().try_into().unwrap()
|
ConcreteDataType::int64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint8),
|
ColumnDataTypeWrapper::uint8_datatype(),
|
||||||
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint16),
|
ColumnDataTypeWrapper::uint16_datatype(),
|
||||||
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint32),
|
ColumnDataTypeWrapper::uint32_datatype(),
|
||||||
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint64),
|
ColumnDataTypeWrapper::uint64_datatype(),
|
||||||
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float32),
|
ColumnDataTypeWrapper::float32_datatype(),
|
||||||
ConcreteDataType::float32_datatype().try_into().unwrap()
|
ConcreteDataType::float32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float64),
|
ColumnDataTypeWrapper::float64_datatype(),
|
||||||
ConcreteDataType::float64_datatype().try_into().unwrap()
|
ConcreteDataType::float64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Binary),
|
ColumnDataTypeWrapper::binary_datatype(),
|
||||||
ConcreteDataType::binary_datatype().try_into().unwrap()
|
ConcreteDataType::binary_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::String),
|
ColumnDataTypeWrapper::string_datatype(),
|
||||||
ConcreteDataType::string_datatype().try_into().unwrap()
|
ConcreteDataType::string_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Date),
|
ColumnDataTypeWrapper::date_datatype(),
|
||||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Datetime),
|
ColumnDataTypeWrapper::datetime_datatype(),
|
||||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
|
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
ConcreteDataType::timestamp_millisecond_datatype()
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
|
ColumnDataTypeWrapper::interval_year_month_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
|
ColumnDataTypeWrapper::interval_day_time_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
|
ColumnDataTypeWrapper::interval_month_day_nano_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
|
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||||
ConcreteDataType::duration_millisecond_datatype()
|
ConcreteDataType::duration_millisecond_datatype()
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||||
|
ConcreteDataType::decimal128_datatype(10, 2)
|
||||||
|
.try_into()
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -1290,6 +1447,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1331,6 +1489,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1372,6 +1531,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1416,6 +1576,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1460,6 +1621,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![2],
|
null_mask: vec![2],
|
||||||
datatype: ColumnDataType::Boolean as i32,
|
datatype: ColumnDataType::Boolean as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
let row_count = 4;
|
let row_count = 4;
|
||||||
|
|
||||||
@@ -1617,17 +1779,17 @@ mod tests {
|
|||||||
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
||||||
Values {
|
Values {
|
||||||
interval_month_day_nano_values: vec![
|
interval_month_day_nano_values: vec![
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 1,
|
months: 1,
|
||||||
days: 2,
|
days: 2,
|
||||||
nanoseconds: 3,
|
nanoseconds: 3,
|
||||||
},
|
},
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 5,
|
months: 5,
|
||||||
days: 6,
|
days: 6,
|
||||||
nanoseconds: 7,
|
nanoseconds: 7,
|
||||||
},
|
},
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 9,
|
months: 9,
|
||||||
days: 10,
|
days: 10,
|
||||||
nanoseconds: 11,
|
nanoseconds: 11,
|
||||||
@@ -1859,4 +2021,33 @@ mod tests {
|
|||||||
assert_eq!(values[6], ValueData::DateValue(30));
|
assert_eq!(values[6], ValueData::DateValue(30));
|
||||||
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_column_type_value_eq() {
|
||||||
|
// test column type eq
|
||||||
|
let column1 = Column {
|
||||||
|
column_name: "test".to_string(),
|
||||||
|
semantic_type: 0,
|
||||||
|
values: Some(Values {
|
||||||
|
bool_values: vec![false, true, true],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![2],
|
||||||
|
datatype: ColumnDataType::Boolean as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
};
|
||||||
|
assert!(is_column_type_value_eq(
|
||||||
|
column1.datatype,
|
||||||
|
column1.datatype_extension,
|
||||||
|
&ConcreteDataType::boolean_datatype(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_convert_to_pb_decimal128() {
|
||||||
|
let decimal = Decimal128::new(123, 3, 1);
|
||||||
|
let pb_decimal = convert_to_pb_decimal128(decimal);
|
||||||
|
assert_eq!(pb_decimal.lo, 123);
|
||||||
|
assert_eq!(pb_decimal.hi, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,10 @@ use crate::helper::ColumnDataTypeWrapper;
|
|||||||
use crate::v1::ColumnDef;
|
use crate::v1::ColumnDef;
|
||||||
|
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
let data_type = ColumnDataTypeWrapper::try_new(
|
||||||
|
column_def.data_type,
|
||||||
|
column_def.datatype_extension.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,13 +4,14 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use secrecy::ExposeSecret;
|
use secrecy::ExposeSecret;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -8,46 +8,45 @@ license.workspace = true
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-grpc = { workspace = true }
|
common-grpc.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-runtime = { workspace = true }
|
common-runtime.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client = { workspace = true }
|
meta-client.workspace = true
|
||||||
metrics.workspace = true
|
|
||||||
moka = { workspace = true, features = ["future"] }
|
moka = { workspace = true, features = ["future"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
|
prometheus.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
store-api = { workspace = true }
|
store-api.workspace = true
|
||||||
table = { workspace = true }
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
catalog = { workspace = true, features = ["testing"] }
|
catalog = { workspace = true, features = ["testing"] }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
log-store = { workspace = true }
|
log-store.workspace = true
|
||||||
object-store = { workspace = true }
|
object-store.workspace = true
|
||||||
storage = { workspace = true }
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ pub enum Error {
|
|||||||
source: table::error::Error,
|
source: table::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(""))]
|
#[snafu(display("Internal error"))]
|
||||||
Internal {
|
Internal {
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
@@ -216,7 +216,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||||
QueryAccessDenied { catalog: String, schema: String },
|
QueryAccessDenied { catalog: String, schema: String },
|
||||||
|
|
||||||
#[snafu(display(""))]
|
#[snafu(display("DataFusion error"))]
|
||||||
Datafusion {
|
Datafusion {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: DataFusionError,
|
error: DataFusionError,
|
||||||
|
|||||||
@@ -202,7 +202,7 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
&schema_name,
|
&schema_name,
|
||||||
&table_name,
|
&table_name,
|
||||||
&column.name,
|
&column.name,
|
||||||
column.data_type.name(),
|
&column.data_type.name(),
|
||||||
semantic_type,
|
semantic_type,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,11 +25,10 @@ use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
|||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||||
RangeRequest, RangeResponse,
|
|
||||||
};
|
};
|
||||||
use common_meta::rpc::KeyValue;
|
use common_meta::rpc::KeyValue;
|
||||||
use common_telemetry::{debug, timer};
|
use common_telemetry::debug;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -152,25 +151,11 @@ impl KvBackend for CachedMetaKvBackend {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
|
||||||
let from_key = &req.from_key.clone();
|
|
||||||
let to_key = &req.to_key.clone();
|
|
||||||
|
|
||||||
let ret = self.kv_backend.move_value(req).await;
|
|
||||||
|
|
||||||
if ret.is_ok() {
|
|
||||||
self.invalidate_key(from_key).await;
|
|
||||||
self.invalidate_key(to_key).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
let _timer = METRIC_CATALOG_KV_GET.start_timer();
|
||||||
|
|
||||||
let init = async {
|
let init = async {
|
||||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
let _timer = METRIC_CATALOG_KV_REMOTE_GET.start_timer();
|
||||||
self.kv_backend.get(key).await.map(|val| {
|
self.kv_backend.get(key).await.map(|val| {
|
||||||
val.with_context(|| CacheNotGetSnafu {
|
val.with_context(|| CacheNotGetSnafu {
|
||||||
key: String::from_utf8_lossy(key),
|
key: String::from_utf8_lossy(key),
|
||||||
@@ -319,14 +304,6 @@ impl KvBackend for MetaKvBackend {
|
|||||||
.context(ExternalSnafu)
|
.context(ExternalSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
|
||||||
self.client
|
|
||||||
.move_value(req)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use std::sync::{Arc, Weak};
|
|||||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
|
||||||
use common_meta::error::Result as MetaResult;
|
use common_meta::error::Result as MetaResult;
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
@@ -55,7 +54,6 @@ pub struct KvBackendCatalogManager {
|
|||||||
cache_invalidator: CacheInvalidatorRef,
|
cache_invalidator: CacheInvalidatorRef,
|
||||||
partition_manager: PartitionRuleManagerRef,
|
partition_manager: PartitionRuleManagerRef,
|
||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
datanode_manager: DatanodeManagerRef,
|
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
system_catalog: SystemCatalog,
|
system_catalog: SystemCatalog,
|
||||||
}
|
}
|
||||||
@@ -76,16 +74,11 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub fn new(
|
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||||
backend: KvBackendRef,
|
|
||||||
cache_invalidator: CacheInvalidatorRef,
|
|
||||||
datanode_manager: DatanodeManagerRef,
|
|
||||||
) -> Arc<Self> {
|
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
cache_invalidator,
|
cache_invalidator,
|
||||||
datanode_manager,
|
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
catalog_manager: me.clone(),
|
catalog_manager: me.clone(),
|
||||||
},
|
},
|
||||||
@@ -99,10 +92,6 @@ impl KvBackendCatalogManager {
|
|||||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||||
&self.table_metadata_manager
|
&self.table_metadata_manager
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
|
||||||
self.datanode_manager.clone()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ use std::collections::hash_map::Entry;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
|
use common_catalog::build_db_string;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
||||||
use metrics::{decrement_gauge, increment_gauge};
|
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
@@ -166,7 +166,7 @@ impl MemoryCatalogManager {
|
|||||||
let arc_self = Arc::new(self.clone());
|
let arc_self = Arc::new(self.clone());
|
||||||
let catalog = arc_self.create_catalog_entry(name);
|
let catalog = arc_self.create_catalog_entry(name);
|
||||||
e.insert(catalog);
|
e.insert(catalog);
|
||||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT.inc();
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
Entry::Occupied(_) => Ok(false),
|
Entry::Occupied(_) => Ok(false),
|
||||||
@@ -187,11 +187,9 @@ impl MemoryCatalogManager {
|
|||||||
})?;
|
})?;
|
||||||
let result = schema.remove(&request.table_name);
|
let result = schema.remove(&request.table_name);
|
||||||
if result.is_some() {
|
if result.is_some() {
|
||||||
decrement_gauge!(
|
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||||
1.0,
|
.dec();
|
||||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -210,7 +208,7 @@ impl MemoryCatalogManager {
|
|||||||
match catalog.entry(request.schema) {
|
match catalog.entry(request.schema) {
|
||||||
Entry::Vacant(e) => {
|
Entry::Vacant(e) => {
|
||||||
e.insert(HashMap::new());
|
e.insert(HashMap::new());
|
||||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT.inc();
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
Entry::Occupied(_) => Ok(false),
|
Entry::Occupied(_) => Ok(false),
|
||||||
@@ -238,11 +236,9 @@ impl MemoryCatalogManager {
|
|||||||
.fail();
|
.fail();
|
||||||
}
|
}
|
||||||
schema.insert(request.table_name, request.table);
|
schema.insert(request.table_name, request.table);
|
||||||
increment_gauge!(
|
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||||
1.0,
|
.inc();
|
||||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
|
||||||
);
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,18 +12,24 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_catalog::build_db_string;
|
|
||||||
|
|
||||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||||
|
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
use lazy_static::lazy_static;
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
use prometheus::*;
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
|
||||||
|
|
||||||
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
|
lazy_static! {
|
||||||
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
|
pub static ref METRIC_CATALOG_MANAGER_CATALOG_COUNT: IntGauge =
|
||||||
|
register_int_gauge!("catalog_catalog_count", "catalog catalog count").unwrap();
|
||||||
#[inline]
|
pub static ref METRIC_CATALOG_MANAGER_SCHEMA_COUNT: IntGauge =
|
||||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
register_int_gauge!("catalog_schema_count", "catalog schema count").unwrap();
|
||||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
pub static ref METRIC_CATALOG_MANAGER_TABLE_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||||
|
"catalog_table_count",
|
||||||
|
"catalog table count",
|
||||||
|
&[METRIC_DB_LABEL]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_CATALOG_KV_REMOTE_GET: Histogram =
|
||||||
|
register_histogram!("catalog_kv_get_remote", "catalog kv get remote").unwrap();
|
||||||
|
pub static ref METRIC_CATALOG_KV_GET: Histogram =
|
||||||
|
register_histogram!("catalog_kv_get", "catalog kv get").unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,44 +8,45 @@ license.workspace = true
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
arrow-flight.workspace = true
|
arrow-flight.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-grpc = { workspace = true }
|
common-grpc.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
derive_builder.workspace = true
|
derive_builder.workspace = true
|
||||||
enum_dispatch = "0.3"
|
enum_dispatch = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
|
lazy_static.workspace = true
|
||||||
moka = { workspace = true, features = ["future"] }
|
moka = { workspace = true, features = ["future"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio-stream = { version = "0.1", features = ["net"] }
|
tokio-stream = { version = "0.1", features = ["net"] }
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-grpc-expr = { workspace = true }
|
common-grpc-expr.workspace = true
|
||||||
datanode = { workspace = true }
|
datanode.workspace = true
|
||||||
derive-new = "0.5"
|
derive-new = "0.5"
|
||||||
prost.workspace = true
|
substrait.workspace = true
|
||||||
substrait = { workspace = true }
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
[dev-dependencies.substrait_proto]
|
[dev-dependencies.substrait_proto]
|
||||||
package = "substrait"
|
package = "substrait"
|
||||||
version = "0.7"
|
version = "0.17"
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "key".to_string(),
|
name: "key".to_string(),
|
||||||
@@ -54,6 +55,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Tag as i32,
|
semantic_type: SemanticType::Tag as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "value".to_string(),
|
name: "value".to_string(),
|
||||||
@@ -62,6 +64,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
time_index: "timestamp".to_string(),
|
time_index: "timestamp".to_string(),
|
||||||
@@ -78,7 +81,7 @@ async fn run() {
|
|||||||
|
|
||||||
let logical = mock_logical_plan();
|
let logical = mock_logical_plan();
|
||||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||||
let result = db.logical_plan(logical, 0).await.unwrap();
|
let result = db.logical_plan(logical).await.unwrap();
|
||||||
|
|
||||||
event!(Level::INFO, "result: {:#?}", result);
|
event!(Level::INFO, "result: {:#?}", result);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
|||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
use common_recordbatch::RecordBatchStreamAdaptor;
|
||||||
use common_telemetry::{logging, timer};
|
use common_telemetry::logging;
|
||||||
|
use common_telemetry::tracing_context::W3cTrace;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
@@ -111,12 +112,12 @@ impl Database {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||||
self.handle(Request::Inserts(requests)).await
|
self.handle(Request::Inserts(requests)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||||
self.handle(Request::RowInserts(requests)).await
|
self.handle(Request::RowInserts(requests)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,27 +142,27 @@ impl Database {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
|
let _timer = metrics::METRIC_GRPC_DELETE.start_timer();
|
||||||
self.handle(Request::Deletes(request)).await
|
self.handle(Request::Deletes(request)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle(&self, request: Request) -> Result<u32> {
|
async fn handle(&self, request: Request) -> Result<u32> {
|
||||||
let mut client = self.client.make_database_client()?.inner;
|
let mut client = self.client.make_database_client()?.inner;
|
||||||
let request = self.to_rpc_request(request, 0);
|
let request = self.to_rpc_request(request);
|
||||||
let response = client.handle(request).await?.into_inner();
|
let response = client.handle(request).await?.into_inner();
|
||||||
from_grpc_response(response)
|
from_grpc_response(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn to_rpc_request(&self, request: Request, trace_id: u64) -> GreptimeRequest {
|
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||||
GreptimeRequest {
|
GreptimeRequest {
|
||||||
header: Some(RequestHeader {
|
header: Some(RequestHeader {
|
||||||
catalog: self.catalog.clone(),
|
catalog: self.catalog.clone(),
|
||||||
schema: self.schema.clone(),
|
schema: self.schema.clone(),
|
||||||
authorization: self.ctx.auth_header.clone(),
|
authorization: self.ctx.auth_header.clone(),
|
||||||
dbname: self.dbname.clone(),
|
dbname: self.dbname.clone(),
|
||||||
trace_id,
|
// TODO(Taylor-lagrange): add client grpc tracing
|
||||||
span_id: 0,
|
tracing_context: W3cTrace::new(),
|
||||||
}),
|
}),
|
||||||
request: Some(request),
|
request: Some(request),
|
||||||
}
|
}
|
||||||
@@ -171,24 +172,18 @@ impl Database {
|
|||||||
where
|
where
|
||||||
S: AsRef<str>,
|
S: AsRef<str>,
|
||||||
{
|
{
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Query(QueryRequest {
|
||||||
Request::Query(QueryRequest {
|
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
|
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Query(QueryRequest {
|
||||||
Request::Query(QueryRequest {
|
query: Some(Query::LogicalPlan(logical_plan)),
|
||||||
query: Some(Query::LogicalPlan(logical_plan)),
|
}))
|
||||||
}),
|
|
||||||
trace_id,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,69 +194,54 @@ impl Database {
|
|||||||
end: &str,
|
end: &str,
|
||||||
step: &str,
|
step: &str,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Query(QueryRequest {
|
||||||
Request::Query(QueryRequest {
|
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
query: promql.to_string(),
|
||||||
query: promql.to_string(),
|
start: start.to_string(),
|
||||||
start: start.to_string(),
|
end: end.to_string(),
|
||||||
end: end.to_string(),
|
step: step.to_string(),
|
||||||
step: step.to_string(),
|
})),
|
||||||
})),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::CreateTable(expr)),
|
||||||
expr: Some(DdlExpr::CreateTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::Alter(expr)),
|
||||||
expr: Some(DdlExpr::Alter(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::DropTable(expr)),
|
||||||
expr: Some(DdlExpr::DropTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_TRUNCATE_TABLE);
|
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
|
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||||
// FIXME(paomian): should be added some labels for metrics
|
// FIXME(paomian): should be added some labels for metrics
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||||
let request = self.to_rpc_request(request, trace_id);
|
let request = self.to_rpc_request(request);
|
||||||
let request = Ticket {
|
let request = Ticket {
|
||||||
ticket: request.encode_to_vec().into(),
|
ticket: request.encode_to_vec().into(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -12,15 +12,34 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! client metrics
|
use lazy_static::lazy_static;
|
||||||
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
|
use prometheus::*;
|
||||||
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
|
|
||||||
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
|
lazy_static! {
|
||||||
pub const METRIC_GRPC_DELETE: &str = "grpc.delete";
|
pub static ref METRIC_GRPC_CREATE_TABLE: Histogram =
|
||||||
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
|
register_histogram!("grpc_create_table", "grpc create table").unwrap();
|
||||||
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
pub static ref METRIC_GRPC_PROMQL_RANGE_QUERY: Histogram =
|
||||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
register_histogram!("grpc_promql_range_query", "grpc promql range query").unwrap();
|
||||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
pub static ref METRIC_GRPC_INSERT: Histogram =
|
||||||
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
|
register_histogram!("grpc_insert", "grpc insert").unwrap();
|
||||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
pub static ref METRIC_GRPC_DELETE: Histogram =
|
||||||
pub(crate) const METRIC_REGION_REQUEST_GRPC: &str = "grpc.region_request";
|
register_histogram!("grpc_delete", "grpc delete").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_SQL: Histogram =
|
||||||
|
register_histogram!("grpc_sql", "grpc sql").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_LOGICAL_PLAN: Histogram =
|
||||||
|
register_histogram!("grpc_logical_plan", "grpc logical plan").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_ALTER: Histogram =
|
||||||
|
register_histogram!("grpc_alter", "grpc alter").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_DROP_TABLE: Histogram =
|
||||||
|
register_histogram!("grpc_drop_table", "grpc drop table").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_TRUNCATE_TABLE: Histogram =
|
||||||
|
register_histogram!("grpc_truncate_table", "grpc truncate table").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_DO_GET: Histogram =
|
||||||
|
register_histogram!("grpc_do_get", "grpc do get").unwrap();
|
||||||
|
pub static ref METRIC_REGION_REQUEST_GRPC: HistogramVec = register_histogram_vec!(
|
||||||
|
"grpc_region_request",
|
||||||
|
"grpc region request",
|
||||||
|
&["request_type"]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use common_meta::datanode_manager::{AffectedRows, Datanode};
|
|||||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||||
use common_telemetry::{error, timer};
|
use common_telemetry::error;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{location, Location, OptionExt, ResultExt};
|
use snafu::{location, Location, OptionExt, ResultExt};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
@@ -152,11 +152,9 @@ impl RegionRequester {
|
|||||||
.with_context(|| MissingFieldSnafu { field: "body" })?
|
.with_context(|| MissingFieldSnafu { field: "body" })?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.to_string();
|
.to_string();
|
||||||
|
let _timer = metrics::METRIC_REGION_REQUEST_GRPC
|
||||||
let _timer = timer!(
|
.with_label_values(&[request_type.as_str()])
|
||||||
metrics::METRIC_REGION_REQUEST_GRPC,
|
.start_timer();
|
||||||
&[("request_type", request_type)]
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut client = self.client.raw_region_client()?;
|
let mut client = self.client.raw_region_client()?;
|
||||||
|
|
||||||
|
|||||||
@@ -10,71 +10,69 @@ name = "greptime"
|
|||||||
path = "src/bin/greptime.rs"
|
path = "src/bin/greptime.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["metrics-process"]
|
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
metrics-process = ["servers/metrics-process"]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anymap = "1.0.0-beta.2"
|
anymap = "1.0.0-beta.2"
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
catalog = { workspace = true }
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap = { version = "3.1", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
client = { workspace = true }
|
client.workspace = true
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-config = { workspace = true }
|
common-config.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-procedure = { workspace = true }
|
common-procedure.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-telemetry = { workspace = true, features = [
|
common-telemetry = { workspace = true, features = [
|
||||||
"deadlock_detection",
|
"deadlock_detection",
|
||||||
] }
|
] }
|
||||||
config = "0.13"
|
config = "0.13"
|
||||||
datanode = { workspace = true }
|
datanode.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
file-engine = { workspace = true }
|
file-engine.workspace = true
|
||||||
frontend = { workspace = true }
|
frontend.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client = { workspace = true }
|
meta-client.workspace = true
|
||||||
meta-srv = { workspace = true }
|
meta-srv.workspace = true
|
||||||
metrics.workspace = true
|
mito2.workspace = true
|
||||||
mito2 = { workspace = true }
|
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
partition = { workspace = true }
|
partition.workspace = true
|
||||||
plugins.workspace = true
|
plugins.workspace = true
|
||||||
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
query = { workspace = true }
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
rustyline = "10.1"
|
rustyline = "10.1"
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
servers = { workspace = true }
|
servers.workspace = true
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
substrait = { workspace = true }
|
substrait.workspace = true
|
||||||
table = { workspace = true }
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dependencies]
|
[target.'cfg(not(windows))'.dependencies]
|
||||||
tikv-jemallocator = "0.5"
|
tikv-jemallocator = "0.5"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
temp-env = "0.3"
|
temp-env = "0.3"
|
||||||
toml.workspace = true
|
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dev-dependencies]
|
[target.'cfg(not(windows))'.dev-dependencies]
|
||||||
rexpect = "0.5"
|
rexpect = "0.5"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
common-version = { workspace = true }
|
common-version.workspace = true
|
||||||
|
|||||||
@@ -21,7 +21,11 @@ use cmd::error::Result;
|
|||||||
use cmd::options::{Options, TopLevelOptions};
|
use cmd::options::{Options, TopLevelOptions};
|
||||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||||
use common_telemetry::logging::{error, info, TracingOptions};
|
use common_telemetry::logging::{error, info, TracingOptions};
|
||||||
use metrics::gauge;
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||||
|
prometheus::register_int_gauge_vec!("app_version", "app version", &["short_version", "version"]).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[clap(name = "greptimedb", version = print_version())]
|
#[clap(name = "greptimedb", version = print_version())]
|
||||||
@@ -204,11 +208,13 @@ async fn main() -> Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
common_telemetry::init_default_metrics_recorder();
|
let _guard =
|
||||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts, opts.node_id());
|
||||||
|
|
||||||
// Report app version as gauge.
|
// Report app version as gauge.
|
||||||
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
|
APP_VERSION
|
||||||
|
.with_label_values(&[short_version(), full_version()])
|
||||||
|
.inc();
|
||||||
|
|
||||||
// Log version and argument flags.
|
// Log version and argument flags.
|
||||||
info!(
|
info!(
|
||||||
|
|||||||
@@ -20,14 +20,13 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_meta::table_name::TableName;
|
use common_meta::table_name::TableName;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use meta_srv::service::store::etcd::EtcdStore;
|
|
||||||
use meta_srv::service::store::kv::KvBackendAdapter;
|
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||||
|
|
||||||
@@ -64,9 +63,7 @@ impl BenchTableMetadataCommand {
|
|||||||
pub async fn build(&self) -> Result<Instance> {
|
pub async fn build(&self) -> Result<Instance> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||||
|
|
||||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||||
etcd_store,
|
|
||||||
)));
|
|
||||||
|
|
||||||
let tool = BenchTableMetadata {
|
let tool = BenchTableMetadata {
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
@@ -157,6 +154,7 @@ fn create_region_routes() -> Vec<RegionRoute> {
|
|||||||
addr: String::new(),
|
addr: String::new(),
|
||||||
}),
|
}),
|
||||||
follower_peers: vec![],
|
follower_peers: vec![],
|
||||||
|
leader_status: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
|
use client::api::v1::auth_header::AuthScheme;
|
||||||
|
use client::api::v1::Basic;
|
||||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::util::collect;
|
use common_recordbatch::util::collect;
|
||||||
@@ -25,13 +27,14 @@ use datatypes::scalars::ScalarVector;
|
|||||||
use datatypes::vectors::{StringVector, Vector};
|
use datatypes::vectors::{StringVector, Vector};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
use crate::cli::{Instance, Tool};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||||
InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu, Result,
|
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
||||||
|
Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
type TableReference = (String, String, String);
|
type TableReference = (String, String, String);
|
||||||
@@ -70,6 +73,10 @@ pub struct ExportCommand {
|
|||||||
/// Things to export
|
/// Things to export
|
||||||
#[clap(long, short = 't', value_enum)]
|
#[clap(long, short = 't', value_enum)]
|
||||||
target: ExportTarget,
|
target: ExportTarget,
|
||||||
|
|
||||||
|
/// basic authentication for connecting to the server
|
||||||
|
#[clap(long)]
|
||||||
|
auth_basic: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExportCommand {
|
impl ExportCommand {
|
||||||
@@ -82,12 +89,22 @@ impl ExportCommand {
|
|||||||
addr: self.addr.clone(),
|
addr: self.addr.clone(),
|
||||||
})?;
|
})?;
|
||||||
let (catalog, schema) = split_database(&self.database)?;
|
let (catalog, schema) = split_database(&self.database)?;
|
||||||
let database_client = Database::new(
|
let mut database_client = Database::new(
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||||
client,
|
client,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if let Some(auth_basic) = &self.auth_basic {
|
||||||
|
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
||||||
|
msg: "auth_basic cannot be split by ':'".to_string(),
|
||||||
|
})?;
|
||||||
|
database_client.set_auth(AuthScheme::Basic(Basic {
|
||||||
|
username: username.to_string(),
|
||||||
|
password: password.to_string(),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Instance::Tool(Box::new(Export {
|
Ok(Instance::Tool(Box::new(Export {
|
||||||
client: database_client,
|
client: database_client,
|
||||||
catalog,
|
catalog,
|
||||||
@@ -141,6 +158,9 @@ impl Export {
|
|||||||
let mut result = Vec::with_capacity(schemas.len());
|
let mut result = Vec::with_capacity(schemas.len());
|
||||||
for i in 0..schemas.len() {
|
for i in 0..schemas.len() {
|
||||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||||
|
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
result.push((self.catalog.clone(), schema));
|
result.push((self.catalog.clone(), schema));
|
||||||
}
|
}
|
||||||
Ok(result)
|
Ok(result)
|
||||||
@@ -326,25 +346,30 @@ impl Export {
|
|||||||
|
|
||||||
let copy_from_file =
|
let copy_from_file =
|
||||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||||
let mut file = File::create(copy_from_file).await.context(FileIoSnafu)?;
|
let mut writer =
|
||||||
|
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||||
|
|
||||||
let copy_from_sql = dir_filenames
|
for table_file in dir_filenames {
|
||||||
.into_iter()
|
let table_file = table_file.unwrap();
|
||||||
.map(|file| {
|
let table_name = table_file
|
||||||
let file = file.unwrap();
|
.file_name()
|
||||||
let filename = file.file_name().into_string().unwrap();
|
.into_string()
|
||||||
|
.unwrap()
|
||||||
|
.replace(".parquet", "");
|
||||||
|
|
||||||
format!(
|
writer
|
||||||
"copy {} from '{}' with (format='parquet');\n",
|
.write(
|
||||||
filename.replace(".parquet", ""),
|
format!(
|
||||||
file.path().to_str().unwrap()
|
"copy {} from '{}' with (format='parquet');\n",
|
||||||
|
table_name,
|
||||||
|
table_file.path().to_str().unwrap()
|
||||||
|
)
|
||||||
|
.as_bytes(),
|
||||||
)
|
)
|
||||||
})
|
.await
|
||||||
.collect::<Vec<_>>()
|
.context(FileIoSnafu)?;
|
||||||
.join("");
|
}
|
||||||
file.write_all(copy_from_sql.as_bytes())
|
writer.flush().await.context(FileIoSnafu)?;
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
|
|
||||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
||||||
use client::client_manager::DatanodeClients;
|
|
||||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
@@ -176,7 +175,7 @@ impl Repl {
|
|||||||
.encode(&plan)
|
.encode(&plan)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
self.database.logical_plan(plan.to_vec(), 0).await
|
self.database.logical_plan(plan.to_vec()).await
|
||||||
} else {
|
} else {
|
||||||
self.database.sql(&sql).await
|
self.database.sql(&sql).await
|
||||||
}
|
}
|
||||||
@@ -250,13 +249,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
|
|
||||||
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||||
|
|
||||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
let catalog_list =
|
||||||
|
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
|
||||||
let catalog_list = KvBackendCatalogManager::new(
|
|
||||||
cached_meta_backend.clone(),
|
|
||||||
cached_meta_backend.clone(),
|
|
||||||
datanode_clients,
|
|
||||||
);
|
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_list,
|
catalog_list,
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
|||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||||
use common_meta::key::{RegionDistribution, TableMetaKey};
|
use common_meta::key::{RegionDistribution, TableMetaKey};
|
||||||
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::range_stream::PaginationStream;
|
use common_meta::range_stream::PaginationStream;
|
||||||
use common_meta::rpc::router::TableRoute;
|
use common_meta::rpc::router::TableRoute;
|
||||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||||
@@ -35,8 +37,6 @@ use common_meta::util::get_prefix_end_key;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use etcd_client::Client;
|
use etcd_client::Client;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use meta_srv::service::store::etcd::EtcdStore;
|
|
||||||
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
|
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||||
@@ -81,7 +81,7 @@ impl UpgradeCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct MigrateTableMetadata {
|
struct MigrateTableMetadata {
|
||||||
etcd_store: KvStoreRef,
|
etcd_store: KvBackendRef,
|
||||||
dryrun: bool,
|
dryrun: bool,
|
||||||
|
|
||||||
skip_table_global_keys: bool,
|
skip_table_global_keys: bool,
|
||||||
@@ -123,7 +123,7 @@ impl MigrateTableMetadata {
|
|||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
|
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -182,7 +182,7 @@ impl MigrateTableMetadata {
|
|||||||
let mut keys = Vec::new();
|
let mut keys = Vec::new();
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -234,7 +234,7 @@ impl MigrateTableMetadata {
|
|||||||
let mut keys = Vec::new();
|
let mut keys = Vec::new();
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -284,7 +284,7 @@ impl MigrateTableMetadata {
|
|||||||
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end.clone()),
|
RangeRequest::new().with_range(key, range_end.clone()),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
|
|||||||
@@ -12,15 +12,17 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use catalog::kvbackend::MetaKvBackend;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::logging;
|
||||||
use datanode::config::DatanodeOptions;
|
use datanode::config::DatanodeOptions;
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||||
use crate::options::{Options, TopLevelOptions};
|
use crate::options::{Options, TopLevelOptions};
|
||||||
@@ -89,7 +91,7 @@ struct StartCommand {
|
|||||||
rpc_addr: Option<String>,
|
rpc_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
rpc_hostname: Option<String>,
|
rpc_hostname: Option<String>,
|
||||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||||
metasrv_addr: Option<Vec<String>>,
|
metasrv_addr: Option<Vec<String>>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
@@ -177,7 +179,27 @@ impl StartCommand {
|
|||||||
logging::info!("Datanode start command: {:#?}", self);
|
logging::info!("Datanode start command: {:#?}", self);
|
||||||
logging::info!("Datanode options: {:#?}", opts);
|
logging::info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
let node_id = opts
|
||||||
|
.node_id
|
||||||
|
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||||
|
|
||||||
|
let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||||
|
msg: "'meta_client_options'",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let meta_client = datanode::heartbeat::new_metasrv_client(node_id, meta_config)
|
||||||
|
.await
|
||||||
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
|
let meta_backend = Arc::new(MetaKvBackend {
|
||||||
|
client: Arc::new(meta_client.clone()),
|
||||||
|
});
|
||||||
|
|
||||||
|
let datanode = DatanodeBuilder::new(opts, plugins)
|
||||||
|
.with_meta_client(meta_client)
|
||||||
|
.with_kv_backend(meta_backend)
|
||||||
|
.enable_region_server_service()
|
||||||
|
.enable_http_service()
|
||||||
.build()
|
.build()
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
@@ -191,9 +213,8 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
use datanode::config::{FileConfig, ObjectStoreConfig};
|
||||||
use servers::heartbeat_options::HeartbeatOptions;
|
use servers::heartbeat_options::HeartbeatOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
@@ -233,16 +254,6 @@ mod tests {
|
|||||||
type = "File"
|
type = "File"
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
[storage.compaction]
|
|
||||||
max_inflight_tasks = 3
|
|
||||||
max_files_in_level0 = 7
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
[storage.manifest]
|
|
||||||
checkpoint_margin = 9
|
|
||||||
gc_duration = '7s'
|
|
||||||
compress = true
|
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -295,24 +306,6 @@ mod tests {
|
|||||||
ObjectStoreConfig::File(FileConfig { .. })
|
ObjectStoreConfig::File(FileConfig { .. })
|
||||||
));
|
));
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
CompactionConfig {
|
|
||||||
max_inflight_tasks: 3,
|
|
||||||
max_files_in_level0: 7,
|
|
||||||
max_purge_tasks: 32,
|
|
||||||
sst_write_buffer_size: ReadableSize::mb(8),
|
|
||||||
},
|
|
||||||
options.storage.compaction,
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
RegionManifestConfig {
|
|
||||||
checkpoint_margin: Some(9),
|
|
||||||
gc_duration: Some(Duration::from_secs(7)),
|
|
||||||
compress: true
|
|
||||||
},
|
|
||||||
options.storage.manifest,
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!("debug", options.logging.level.unwrap());
|
assert_eq!("debug", options.logging.level.unwrap());
|
||||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||||
}
|
}
|
||||||
@@ -389,18 +382,12 @@ mod tests {
|
|||||||
file_size = "1GB"
|
file_size = "1GB"
|
||||||
purge_threshold = "50GB"
|
purge_threshold = "50GB"
|
||||||
purge_interval = "10m"
|
purge_interval = "10m"
|
||||||
read_batch_size = 128
|
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
[storage]
|
[storage]
|
||||||
type = "File"
|
type = "File"
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
[storage.compaction]
|
|
||||||
max_inflight_tasks = 3
|
|
||||||
max_files_in_level0 = 7
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -411,26 +398,24 @@ mod tests {
|
|||||||
temp_env::with_vars(
|
temp_env::with_vars(
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
// storage.manifest.gc_duration = 9s
|
// wal.purge_interval = 1m
|
||||||
[
|
[
|
||||||
env_prefix.to_string(),
|
env_prefix.to_string(),
|
||||||
"storage".to_uppercase(),
|
"wal".to_uppercase(),
|
||||||
"manifest".to_uppercase(),
|
"purge_interval".to_uppercase(),
|
||||||
"gc_duration".to_uppercase(),
|
|
||||||
]
|
]
|
||||||
.join(ENV_VAR_SEP),
|
.join(ENV_VAR_SEP),
|
||||||
Some("9s"),
|
Some("1m"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
// storage.compaction.max_purge_tasks = 99
|
// wal.read_batch_size = 100
|
||||||
[
|
[
|
||||||
env_prefix.to_string(),
|
env_prefix.to_string(),
|
||||||
"storage".to_uppercase(),
|
"wal".to_uppercase(),
|
||||||
"compaction".to_uppercase(),
|
"read_batch_size".to_uppercase(),
|
||||||
"max_purge_tasks".to_uppercase(),
|
|
||||||
]
|
]
|
||||||
.join(ENV_VAR_SEP),
|
.join(ENV_VAR_SEP),
|
||||||
Some("99"),
|
Some("100"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||||
@@ -458,10 +443,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(
|
assert_eq!(opts.wal.read_batch_size, 100,);
|
||||||
opts.storage.manifest.gc_duration,
|
|
||||||
Some(Duration::from_secs(9))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
opts.meta_client.unwrap().metasrv_addrs,
|
opts.meta_client.unwrap().metasrv_addrs,
|
||||||
vec![
|
vec![
|
||||||
@@ -472,19 +454,13 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Should be read from config file, config file > env > default values.
|
// Should be read from config file, config file > env > default values.
|
||||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
assert_eq!(opts.wal.purge_interval, Duration::from_secs(60 * 10));
|
||||||
|
|
||||||
// Should be read from cli, cli > config file > env > default values.
|
// Should be read from cli, cli > config file > env > default values.
|
||||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||||
|
|
||||||
// Should be default value.
|
// Should be default value.
|
||||||
assert_eq!(
|
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
||||||
opts.storage.manifest.checkpoint_margin,
|
|
||||||
DatanodeOptions::default()
|
|
||||||
.storage
|
|
||||||
.manifest
|
|
||||||
.checkpoint_margin
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,6 +37,12 @@ pub enum Error {
|
|||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to init DDL manager"))]
|
||||||
|
InitDdlManager {
|
||||||
|
location: Location,
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start procedure manager"))]
|
#[snafu(display("Failed to start procedure manager"))]
|
||||||
StartProcedureManager {
|
StartProcedureManager {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -240,9 +246,11 @@ impl ErrorExt for Error {
|
|||||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
|
||||||
source.status_code()
|
Error::IterStream { source, .. }
|
||||||
}
|
| Error::InitMetadata { source, .. }
|
||||||
|
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::ConnectServer { source, .. } => source.status_code(),
|
Error::ConnectServer { source, .. } => source.status_code(),
|
||||||
Error::MissingConfig { .. }
|
Error::MissingConfig { .. }
|
||||||
| Error::LoadLayeredConfig { .. }
|
| Error::LoadLayeredConfig { .. }
|
||||||
@@ -253,6 +261,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::CreateDir { .. }
|
| Error::CreateDir { .. }
|
||||||
| Error::EmptyResult { .. }
|
| Error::EmptyResult { .. }
|
||||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||||
|
|||||||
@@ -12,18 +12,26 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use catalog::kvbackend::CachedMetaKvBackend;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use client::client_manager::DatanodeClients;
|
||||||
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::logging;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
|
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||||
|
use frontend::heartbeat::HeartbeatTask;
|
||||||
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{self, Result, StartFrontendSnafu};
|
use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||||
use crate::options::{Options, TopLevelOptions};
|
use crate::options::{Options, TopLevelOptions};
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -100,7 +108,7 @@ pub struct StartCommand {
|
|||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
influxdb_enable: Option<bool>,
|
influxdb_enable: Option<bool>,
|
||||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||||
metasrv_addr: Option<Vec<String>>,
|
metasrv_addr: Option<Vec<String>>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
tls_mode: Option<TlsMode>,
|
tls_mode: Option<TlsMode>,
|
||||||
@@ -188,6 +196,7 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||||
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
@@ -195,12 +204,40 @@ impl StartCommand {
|
|||||||
logging::info!("Frontend start command: {:#?}", self);
|
logging::info!("Frontend start command: {:#?}", self);
|
||||||
logging::info!("Frontend options: {:#?}", opts);
|
logging::info!("Frontend options: {:#?}", opts);
|
||||||
|
|
||||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||||
|
msg: "'meta_client'",
|
||||||
|
})?;
|
||||||
|
let meta_client = FeInstance::create_meta_client(meta_client_options)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
|
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||||
|
|
||||||
|
let executor = HandlerGroupExecutor::new(vec![
|
||||||
|
Arc::new(ParseMailboxMessageHandler),
|
||||||
|
Arc::new(InvalidateTableCacheHandler::new(meta_backend.clone())),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let heartbeat_task = HeartbeatTask::new(
|
||||||
|
meta_client.clone(),
|
||||||
|
opts.heartbeat.clone(),
|
||||||
|
Arc::new(executor),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut instance = FrontendBuilder::new(
|
||||||
|
meta_backend.clone(),
|
||||||
|
Arc::new(DatanodeClients::default()),
|
||||||
|
meta_client,
|
||||||
|
)
|
||||||
|
.with_cache_invalidator(meta_backend)
|
||||||
|
.with_plugin(plugins)
|
||||||
|
.with_heartbeat_task(heartbeat_task)
|
||||||
|
.try_build()
|
||||||
|
.await
|
||||||
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
instance
|
instance
|
||||||
.build_servers(&opts)
|
.build_servers(opts)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
@@ -312,6 +349,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||||
|
|
||||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||||
|
|||||||
@@ -100,6 +100,9 @@ struct StartCommand {
|
|||||||
http_timeout: Option<u64>,
|
http_timeout: Option<u64>,
|
||||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||||
env_prefix: String,
|
env_prefix: String,
|
||||||
|
/// The working home directory of this metasrv instance.
|
||||||
|
#[clap(long)]
|
||||||
|
data_home: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
@@ -152,6 +155,10 @@ impl StartCommand {
|
|||||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(data_home) = &self.data_home {
|
||||||
|
opts.data_home = data_home.clone();
|
||||||
|
}
|
||||||
|
|
||||||
// Disable dashboard in metasrv.
|
// Disable dashboard in metasrv.
|
||||||
opts.http.disable_dashboard = true;
|
opts.http.disable_dashboard = true;
|
||||||
|
|
||||||
@@ -166,7 +173,12 @@ impl StartCommand {
|
|||||||
logging::info!("MetaSrv start command: {:#?}", self);
|
logging::info!("MetaSrv start command: {:#?}", self);
|
||||||
logging::info!("MetaSrv options: {:#?}", opts);
|
logging::info!("MetaSrv options: {:#?}", opts);
|
||||||
|
|
||||||
let instance = MetaSrvInstance::new(opts, plugins)
|
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||||
|
.await
|
||||||
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
|
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
|
let instance = MetaSrvInstance::new(opts, plugins, metasrv)
|
||||||
.await
|
.await
|
||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
@@ -216,6 +228,12 @@ mod tests {
|
|||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
|
|
||||||
|
[failure_detector]
|
||||||
|
threshold = 8.0
|
||||||
|
min_std_deviation = "100ms"
|
||||||
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
|
first_heartbeat_estimate = "1000ms"
|
||||||
"#;
|
"#;
|
||||||
write!(file, "{}", toml_str).unwrap();
|
write!(file, "{}", toml_str).unwrap();
|
||||||
|
|
||||||
@@ -234,6 +252,25 @@ mod tests {
|
|||||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||||
|
assert_eq!(8.0, options.failure_detector.threshold);
|
||||||
|
assert_eq!(
|
||||||
|
100.0,
|
||||||
|
options.failure_detector.min_std_deviation.as_millis() as f32
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
3000,
|
||||||
|
options
|
||||||
|
.failure_detector
|
||||||
|
.acceptable_heartbeat_pause
|
||||||
|
.as_millis()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
1000,
|
||||||
|
options
|
||||||
|
.failure_detector
|
||||||
|
.first_heartbeat_estimate
|
||||||
|
.as_millis()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -12,11 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_config::KvStoreConfig;
|
use common_config::KvBackendConfig;
|
||||||
use common_telemetry::logging::LoggingOptions;
|
use common_telemetry::logging::LoggingOptions;
|
||||||
use config::{Config, Environment, File, FileFormat};
|
use config::{Config, Environment, File, FileFormat};
|
||||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
||||||
|
use frontend::frontend::{FrontendOptions, TomlSerializable};
|
||||||
use meta_srv::metasrv::MetaSrvOptions;
|
use meta_srv::metasrv::MetaSrvOptions;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -27,15 +28,28 @@ pub const ENV_VAR_SEP: &str = "__";
|
|||||||
pub const ENV_LIST_SEP: &str = ",";
|
pub const ENV_LIST_SEP: &str = ",";
|
||||||
|
|
||||||
/// Options mixed up from datanode, frontend and metasrv.
|
/// Options mixed up from datanode, frontend and metasrv.
|
||||||
|
#[derive(Serialize, Debug)]
|
||||||
pub struct MixOptions {
|
pub struct MixOptions {
|
||||||
pub data_home: String,
|
pub data_home: String,
|
||||||
pub procedure: ProcedureConfig,
|
pub procedure: ProcedureConfig,
|
||||||
pub metadata_store: KvStoreConfig,
|
pub metadata_store: KvBackendConfig,
|
||||||
pub frontend: FrontendOptions,
|
pub frontend: FrontendOptions,
|
||||||
pub datanode: DatanodeOptions,
|
pub datanode: DatanodeOptions,
|
||||||
pub logging: LoggingOptions,
|
pub logging: LoggingOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<MixOptions> for FrontendOptions {
|
||||||
|
fn from(value: MixOptions) -> Self {
|
||||||
|
value.frontend
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TomlSerializable for MixOptions {
|
||||||
|
fn to_toml(&self) -> FeResult<String> {
|
||||||
|
toml::to_string(self).context(TomlFormatSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub enum Options {
|
pub enum Options {
|
||||||
Datanode(Box<DatanodeOptions>),
|
Datanode(Box<DatanodeOptions>),
|
||||||
Frontend(Box<FrontendOptions>),
|
Frontend(Box<FrontendOptions>),
|
||||||
@@ -119,12 +133,20 @@ impl Options {
|
|||||||
|
|
||||||
Ok(opts)
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn node_id(&self) -> Option<String> {
|
||||||
|
match self {
|
||||||
|
Options::Metasrv(_) | Options::Cli(_) => None,
|
||||||
|
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
|
||||||
|
Options::Frontend(opt) => opt.node_id.clone(),
|
||||||
|
Options::Standalone(opt) => opt.frontend.node_id.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
||||||
@@ -156,11 +178,6 @@ mod tests {
|
|||||||
read_batch_size = 128
|
read_batch_size = 128
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
[storage.compaction]
|
|
||||||
max_inflight_tasks = 3
|
|
||||||
max_files_in_level0 = 7
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -171,17 +188,6 @@ mod tests {
|
|||||||
temp_env::with_vars(
|
temp_env::with_vars(
|
||||||
// The following environment variables will be used to override the values in the config file.
|
// The following environment variables will be used to override the values in the config file.
|
||||||
[
|
[
|
||||||
(
|
|
||||||
// storage.manifest.checkpoint_margin = 99
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"storage".to_uppercase(),
|
|
||||||
"manifest".to_uppercase(),
|
|
||||||
"checkpoint_margin".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("99"),
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
// storage.type = S3
|
// storage.type = S3
|
||||||
[
|
[
|
||||||
@@ -202,17 +208,6 @@ mod tests {
|
|||||||
.join(ENV_VAR_SEP),
|
.join(ENV_VAR_SEP),
|
||||||
Some("mybucket"),
|
Some("mybucket"),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
// storage.manifest.gc_duration = 42s
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"storage".to_uppercase(),
|
|
||||||
"manifest".to_uppercase(),
|
|
||||||
"gc_duration".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("42s"),
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
// wal.dir = /other/wal/dir
|
// wal.dir = /other/wal/dir
|
||||||
[
|
[
|
||||||
@@ -243,17 +238,12 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Check the configs from environment variables.
|
// Check the configs from environment variables.
|
||||||
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
|
|
||||||
match opts.storage.store {
|
match opts.storage.store {
|
||||||
ObjectStoreConfig::S3(s3_config) => {
|
ObjectStoreConfig::S3(s3_config) => {
|
||||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||||
}
|
}
|
||||||
_ => panic!("unexpected store type"),
|
_ => panic!("unexpected store type"),
|
||||||
}
|
}
|
||||||
assert_eq!(
|
|
||||||
opts.storage.manifest.gc_duration,
|
|
||||||
Some(Duration::from_secs(42))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
opts.meta_client.unwrap().metasrv_addrs,
|
opts.meta_client.unwrap().metasrv_addrs,
|
||||||
vec![
|
vec![
|
||||||
|
|||||||
@@ -15,21 +15,23 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{fs, path};
|
use std::{fs, path};
|
||||||
|
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
|
||||||
use catalog::CatalogManagerRef;
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_base::Plugins;
|
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||||
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
|
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||||
|
use common_meta::ddl::DdlTaskExecutorRef;
|
||||||
|
use common_meta::ddl_manager::DdlManager;
|
||||||
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::LoggingOptions;
|
use common_telemetry::logging::LoggingOptions;
|
||||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use datanode::region_server::RegionServer;
|
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
|
use frontend::instance::standalone::StandaloneTableMetadataCreator;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||||
use frontend::service_config::{
|
use frontend::service_config::{
|
||||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||||
@@ -42,9 +44,9 @@ use servers::Mode;
|
|||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
|
||||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||||
StopProcedureManagerSnafu,
|
StartProcedureManagerSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||||
|
|
||||||
@@ -97,7 +99,7 @@ pub struct StandaloneOptions {
|
|||||||
pub prom_store: PromStoreOptions,
|
pub prom_store: PromStoreOptions,
|
||||||
pub wal: WalConfig,
|
pub wal: WalConfig,
|
||||||
pub storage: StorageConfig,
|
pub storage: StorageConfig,
|
||||||
pub metadata_store: KvStoreConfig,
|
pub metadata_store: KvBackendConfig,
|
||||||
pub procedure: ProcedureConfig,
|
pub procedure: ProcedureConfig,
|
||||||
pub logging: LoggingOptions,
|
pub logging: LoggingOptions,
|
||||||
pub user_provider: Option<String>,
|
pub user_provider: Option<String>,
|
||||||
@@ -119,7 +121,7 @@ impl Default for StandaloneOptions {
|
|||||||
prom_store: PromStoreOptions::default(),
|
prom_store: PromStoreOptions::default(),
|
||||||
wal: WalConfig::default(),
|
wal: WalConfig::default(),
|
||||||
storage: StorageConfig::default(),
|
storage: StorageConfig::default(),
|
||||||
metadata_store: KvStoreConfig::default(),
|
metadata_store: KvBackendConfig::default(),
|
||||||
procedure: ProcedureConfig::default(),
|
procedure: ProcedureConfig::default(),
|
||||||
logging: LoggingOptions::default(),
|
logging: LoggingOptions::default(),
|
||||||
user_provider: None,
|
user_provider: None,
|
||||||
@@ -156,6 +158,7 @@ impl StandaloneOptions {
|
|||||||
wal: self.wal,
|
wal: self.wal,
|
||||||
storage: self.storage,
|
storage: self.storage,
|
||||||
region_engine: self.region_engine,
|
region_engine: self.region_engine,
|
||||||
|
rpc_addr: self.grpc.addr,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -169,9 +172,7 @@ pub struct Instance {
|
|||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub async fn start(&mut self) -> Result<()> {
|
pub async fn start(&mut self) -> Result<()> {
|
||||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
self.datanode.start_telemetry();
|
||||||
self.datanode.start().await.context(StartDatanodeSnafu)?;
|
|
||||||
info!("Datanode instance started");
|
|
||||||
|
|
||||||
self.procedure_manager
|
self.procedure_manager
|
||||||
.start()
|
.start()
|
||||||
@@ -229,6 +230,9 @@ struct StartCommand {
|
|||||||
user_provider: Option<String>,
|
user_provider: Option<String>,
|
||||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||||
env_prefix: String,
|
env_prefix: String,
|
||||||
|
/// The working home directory of this standalone instance.
|
||||||
|
#[clap(long)]
|
||||||
|
data_home: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
@@ -259,6 +263,10 @@ impl StartCommand {
|
|||||||
opts.http.addr = addr.clone()
|
opts.http.addr = addr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(data_home) = &self.data_home {
|
||||||
|
opts.storage.data_home = data_home.clone();
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
// frontend grpc addr conflict with datanode default grpc addr
|
// frontend grpc addr conflict with datanode default grpc addr
|
||||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||||
@@ -316,18 +324,17 @@ impl StartCommand {
|
|||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
#[allow(clippy::diverging_sub_expression)]
|
#[allow(clippy::diverging_sub_expression)]
|
||||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||||
let mut fe_opts = opts.frontend;
|
let mut fe_opts = opts.frontend.clone();
|
||||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
|
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
let dn_opts = opts.datanode;
|
let dn_opts = opts.datanode.clone();
|
||||||
|
|
||||||
info!("Standalone start command: {:#?}", self);
|
info!("Standalone start command: {:#?}", self);
|
||||||
info!(
|
|
||||||
"Standalone frontend options: {:#?}, datanode options: {:#?}",
|
info!("Building standalone instance with {opts:#?}");
|
||||||
fe_opts, dn_opts
|
|
||||||
);
|
|
||||||
|
|
||||||
// Ensure the data_home directory exists.
|
// Ensure the data_home directory exists.
|
||||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||||
@@ -335,45 +342,35 @@ impl StartCommand {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||||
metadata_dir,
|
metadata_dir,
|
||||||
opts.metadata_store,
|
opts.metadata_store.clone(),
|
||||||
opts.procedure,
|
opts.procedure.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
let datanode =
|
let builder =
|
||||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
|
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||||
.build()
|
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||||
.await
|
|
||||||
.context(StartDatanodeSnafu)?;
|
|
||||||
let region_server = datanode.region_server();
|
|
||||||
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||||
kv_store.clone(),
|
|
||||||
Arc::new(DummyKvCacheInvalidator),
|
|
||||||
Arc::new(StandaloneDatanodeManager(region_server.clone())),
|
|
||||||
);
|
|
||||||
|
|
||||||
catalog_manager
|
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||||
.table_metadata_manager_ref()
|
kv_backend.clone(),
|
||||||
.init()
|
|
||||||
.await
|
|
||||||
.context(InitMetadataSnafu)?;
|
|
||||||
|
|
||||||
// TODO: build frontend instance like in distributed mode
|
|
||||||
let mut frontend = build_frontend(
|
|
||||||
fe_plugins,
|
|
||||||
kv_store,
|
|
||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
catalog_manager,
|
datanode_manager.clone(),
|
||||||
region_server,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
|
||||||
|
.with_plugin(fe_plugins)
|
||||||
|
.try_build()
|
||||||
|
.await
|
||||||
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
frontend
|
frontend
|
||||||
.build_servers(&fe_opts)
|
.build_servers(opts)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
@@ -383,26 +380,41 @@ impl StartCommand {
|
|||||||
procedure_manager,
|
procedure_manager,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Build frontend instance in standalone mode
|
async fn create_ddl_task_executor(
|
||||||
async fn build_frontend(
|
kv_backend: KvBackendRef,
|
||||||
plugins: Plugins,
|
procedure_manager: ProcedureManagerRef,
|
||||||
kv_store: KvBackendRef,
|
datanode_manager: DatanodeManagerRef,
|
||||||
procedure_manager: ProcedureManagerRef,
|
) -> Result<DdlTaskExecutorRef> {
|
||||||
catalog_manager: CatalogManagerRef,
|
let table_metadata_manager =
|
||||||
region_server: RegionServer,
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
) -> Result<FeInstance> {
|
|
||||||
let frontend_instance = FeInstance::try_new_standalone(
|
let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
|
||||||
kv_store,
|
DdlManager::try_new(
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
catalog_manager,
|
datanode_manager,
|
||||||
plugins,
|
Arc::new(DummyCacheInvalidator),
|
||||||
region_server,
|
table_metadata_manager,
|
||||||
)
|
Arc::new(StandaloneTableMetadataCreator::new(kv_backend)),
|
||||||
.await
|
)
|
||||||
.context(StartFrontendSnafu)?;
|
.context(InitDdlManagerSnafu)?,
|
||||||
Ok(frontend_instance)
|
);
|
||||||
|
|
||||||
|
Ok(ddl_task_executor)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_table_metadata_manager(
|
||||||
|
kv_backend: KvBackendRef,
|
||||||
|
) -> Result<TableMetadataManagerRef> {
|
||||||
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||||
|
|
||||||
|
table_metadata_manager
|
||||||
|
.init()
|
||||||
|
.await
|
||||||
|
.context(InitMetadataSnafu)?;
|
||||||
|
|
||||||
|
Ok(table_metadata_manager)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -426,6 +438,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||||
|
|
||||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ license.workspace = true
|
|||||||
anymap = "1.0.0-beta.2"
|
anymap = "1.0.0-beta.2"
|
||||||
bitvec = "1.0"
|
bitvec = "1.0"
|
||||||
bytes = { version = "1.1", features = ["serde"] }
|
bytes = { version = "1.1", features = ["serde"] }
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|||||||
@@ -5,11 +5,9 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
serde.workspace = true
|
snafu.workspace = true
|
||||||
serde_json = "1.0"
|
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
|
|||||||
@@ -54,14 +54,14 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct KvStoreConfig {
|
pub struct KvBackendConfig {
|
||||||
// Kv file size in bytes
|
// Kv file size in bytes
|
||||||
pub file_size: ReadableSize,
|
pub file_size: ReadableSize,
|
||||||
// Kv purge threshold in bytes
|
// Kv purge threshold in bytes
|
||||||
pub purge_threshold: ReadableSize,
|
pub purge_threshold: ReadableSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for KvStoreConfig {
|
impl Default for KvBackendConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
// log file size 256MB
|
// log file size 256MB
|
||||||
|
|||||||
@@ -17,15 +17,17 @@ async-compression = { version = "0.3", features = [
|
|||||||
] }
|
] }
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
bytes = "1.1"
|
bytes = "1.1"
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-runtime = { workspace = true }
|
common-runtime.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
|
datatypes.workspace = true
|
||||||
derive_builder.workspace = true
|
derive_builder.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
object-store = { workspace = true }
|
object-store.workspace = true
|
||||||
orc-rust = "0.2"
|
orc-rust = "0.2"
|
||||||
|
parquet.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
regex = "1.7"
|
regex = "1.7"
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
@@ -36,4 +38,4 @@ tokio.workspace = true
|
|||||||
url = "2.3"
|
url = "2.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
|
|||||||
@@ -26,7 +26,9 @@ use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
|||||||
use tokio_util::io::{ReaderStream, StreamReader};
|
use tokio_util::io::{ReaderStream, StreamReader};
|
||||||
|
|
||||||
use crate::error::{self, Error, Result};
|
use crate::error::{self, Error, Result};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
pub enum CompressionType {
|
pub enum CompressionType {
|
||||||
/// Gzip-ed file
|
/// Gzip-ed file
|
||||||
Gzip,
|
Gzip,
|
||||||
|
|||||||
@@ -166,6 +166,14 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Buffered writer closed"))]
|
#[snafu(display("Buffered writer closed"))]
|
||||||
BufferedWriterClosed { location: Location },
|
BufferedWriterClosed { location: Location },
|
||||||
|
|
||||||
|
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||||
|
WriteParquet {
|
||||||
|
path: String,
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: parquet::errors::ParquetError,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -178,7 +186,8 @@ impl ErrorExt for Error {
|
|||||||
| ListObjects { .. }
|
| ListObjects { .. }
|
||||||
| ReadObject { .. }
|
| ReadObject { .. }
|
||||||
| WriteObject { .. }
|
| WriteObject { .. }
|
||||||
| AsyncWrite { .. } => StatusCode::StorageUnavailable,
|
| AsyncWrite { .. }
|
||||||
|
| WriteParquet { .. } => StatusCode::StorageUnavailable,
|
||||||
|
|
||||||
UnsupportedBackendProtocol { .. }
|
UnsupportedBackendProtocol { .. }
|
||||||
| UnsupportedCompressionType { .. }
|
| UnsupportedCompressionType { .. }
|
||||||
@@ -231,6 +240,7 @@ impl ErrorExt for Error {
|
|||||||
InvalidConnection { location, .. } => Some(*location),
|
InvalidConnection { location, .. } => Some(*location),
|
||||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||||
UnsupportedFormat { location, .. } => Some(*location),
|
UnsupportedFormat { location, .. } => Some(*location),
|
||||||
|
WriteParquet { location, .. } => Some(*location),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,11 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use arrow_schema::Schema;
|
use arrow_schema::{Schema, SchemaRef};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
||||||
use datafusion::error::Result as DatafusionResult;
|
use datafusion::error::Result as DatafusionResult;
|
||||||
@@ -26,11 +28,15 @@ use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
|||||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||||
use datafusion::parquet::format::FileMetaData;
|
use datafusion::parquet::format::FileMetaData;
|
||||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
|
use futures::StreamExt;
|
||||||
use object_store::{ObjectStore, Reader};
|
use object_store::{ObjectStore, Reader};
|
||||||
|
use parquet::basic::{Compression, ZstdLevel};
|
||||||
|
use parquet::file::properties::WriterProperties;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::file_format::FileFormat;
|
use crate::file_format::FileFormat;
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
@@ -156,6 +162,103 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Parquet writer that buffers row groups in memory and writes buffered data to an underlying
|
||||||
|
/// storage by chunks to reduce memory consumption.
|
||||||
|
pub struct BufferedWriter {
|
||||||
|
inner: InnerBufferedWriter,
|
||||||
|
}
|
||||||
|
|
||||||
|
type InnerBufferedWriter = LazyBufferedWriter<
|
||||||
|
object_store::Writer,
|
||||||
|
ArrowWriter<SharedBuffer>,
|
||||||
|
Box<
|
||||||
|
dyn FnMut(
|
||||||
|
String,
|
||||||
|
)
|
||||||
|
-> Pin<Box<dyn Future<Output = error::Result<object_store::Writer>> + Send>>
|
||||||
|
+ Send,
|
||||||
|
>,
|
||||||
|
>;
|
||||||
|
|
||||||
|
impl BufferedWriter {
|
||||||
|
pub async fn try_new(
|
||||||
|
path: String,
|
||||||
|
store: ObjectStore,
|
||||||
|
arrow_schema: SchemaRef,
|
||||||
|
props: Option<WriterProperties>,
|
||||||
|
buffer_threshold: usize,
|
||||||
|
) -> error::Result<Self> {
|
||||||
|
let buffer = SharedBuffer::with_capacity(buffer_threshold);
|
||||||
|
|
||||||
|
let arrow_writer = ArrowWriter::try_new(buffer.clone(), arrow_schema.clone(), props)
|
||||||
|
.context(error::WriteParquetSnafu { path: &path })?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
inner: LazyBufferedWriter::new(
|
||||||
|
buffer_threshold,
|
||||||
|
buffer,
|
||||||
|
arrow_writer,
|
||||||
|
&path,
|
||||||
|
Box::new(move |path| {
|
||||||
|
let store = store.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
store
|
||||||
|
.writer(&path)
|
||||||
|
.await
|
||||||
|
.context(error::WriteObjectSnafu { path })
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write a record batch to stream writer.
|
||||||
|
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
|
||||||
|
self.inner.write(arrow_batch).await?;
|
||||||
|
self.inner.try_flush(false).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Close parquet writer.
|
||||||
|
///
|
||||||
|
/// Return file metadata and bytes written.
|
||||||
|
pub async fn close(self) -> error::Result<(FileMetaData, u64)> {
|
||||||
|
self.inner.close_with_arrow_writer().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Output the stream to a parquet file.
|
||||||
|
///
|
||||||
|
/// Returns number of rows written.
|
||||||
|
pub async fn stream_to_parquet(
|
||||||
|
mut stream: SendableRecordBatchStream,
|
||||||
|
store: ObjectStore,
|
||||||
|
path: &str,
|
||||||
|
threshold: usize,
|
||||||
|
) -> Result<usize> {
|
||||||
|
let write_props = WriterProperties::builder()
|
||||||
|
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||||
|
.build();
|
||||||
|
let schema = stream.schema();
|
||||||
|
let mut buffered_writer = BufferedWriter::try_new(
|
||||||
|
path.to_string(),
|
||||||
|
store,
|
||||||
|
schema,
|
||||||
|
Some(write_props),
|
||||||
|
threshold,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let mut rows_written = 0;
|
||||||
|
while let Some(batch) = stream.next().await {
|
||||||
|
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||||
|
buffered_writer.write(&batch).await?;
|
||||||
|
rows_written += batch.num_rows();
|
||||||
|
}
|
||||||
|
buffered_writer.close().await?;
|
||||||
|
Ok(rows_written)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_test_util::find_workspace_path;
|
use common_test_util::find_workspace_path;
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
15
src/common/decimal/Cargo.toml
Normal file
15
src/common/decimal/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "common-decimal"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
arrow.workspace = true
|
||||||
|
bigdecimal.workspace = true
|
||||||
|
common-error.workspace = true
|
||||||
|
common-macro.workspace = true
|
||||||
|
rust_decimal.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json = "1.0"
|
||||||
|
snafu.workspace = true
|
||||||
432
src/common/decimal/src/decimal128.rs
Normal file
432
src/common/decimal/src/decimal128.rs
Normal file
@@ -0,0 +1,432 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::hash::Hash;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||||
|
use rust_decimal::Decimal as RustDecimal;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use snafu::ResultExt;
|
||||||
|
|
||||||
|
use crate::error::{
|
||||||
|
self, BigDecimalOutOfRangeSnafu, Error, InvalidPrecisionOrScaleSnafu, ParseBigDecimalStrSnafu,
|
||||||
|
ParseRustDecimalStrSnafu,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The maximum precision for [Decimal128] values
|
||||||
|
pub const DECIMAL128_MAX_PRECISION: u8 = 38;
|
||||||
|
|
||||||
|
/// The maximum scale for [Decimal128] values
|
||||||
|
pub const DECIMAL128_MAX_SCALE: i8 = 38;
|
||||||
|
|
||||||
|
/// The default scale for [Decimal128] values
|
||||||
|
pub const DECIMAL128_DEFAULT_SCALE: i8 = 10;
|
||||||
|
|
||||||
|
/// The maximum bytes length that an accurate RustDecimal can represent
|
||||||
|
const BYTES_TO_OVERFLOW_RUST_DECIMAL: usize = 28;
|
||||||
|
|
||||||
|
/// 128bit decimal, using the i128 to represent the decimal.
|
||||||
|
///
|
||||||
|
/// **precision**: the total number of digits in the number, it's range is \[1, 38\].
|
||||||
|
///
|
||||||
|
/// **scale**: the number of digits to the right of the decimal point, it's range is \[0, precision\].
|
||||||
|
#[derive(Debug, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Decimal128 {
|
||||||
|
value: i128,
|
||||||
|
precision: u8,
|
||||||
|
scale: i8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Decimal128 {
|
||||||
|
/// Create a new Decimal128 from i128, precision and scale without any validation.
|
||||||
|
pub fn new(value: i128, precision: u8, scale: i8) -> Self {
|
||||||
|
// debug assert precision and scale is valid
|
||||||
|
debug_assert!(
|
||||||
|
precision > 0 && precision <= DECIMAL128_MAX_PRECISION,
|
||||||
|
"precision should be in [1, {}]",
|
||||||
|
DECIMAL128_MAX_PRECISION
|
||||||
|
);
|
||||||
|
debug_assert!(
|
||||||
|
scale >= 0 && scale <= precision as i8,
|
||||||
|
"scale should be in [0, precision]"
|
||||||
|
);
|
||||||
|
Self {
|
||||||
|
value,
|
||||||
|
precision,
|
||||||
|
scale,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try new Decimal128 from i128, precision and scale with validation.
|
||||||
|
pub fn try_new(value: i128, precision: u8, scale: i8) -> error::Result<Self> {
|
||||||
|
// make sure the precision and scale is valid.
|
||||||
|
valid_precision_and_scale(precision, scale)?;
|
||||||
|
Ok(Self {
|
||||||
|
value,
|
||||||
|
precision,
|
||||||
|
scale,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return underlying value without precision and scale
|
||||||
|
pub fn val(&self) -> i128 {
|
||||||
|
self.value
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the precision of this decimal.
|
||||||
|
pub fn precision(&self) -> u8 {
|
||||||
|
self.precision
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the scale of this decimal.
|
||||||
|
pub fn scale(&self) -> i8 {
|
||||||
|
self.scale
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to ScalarValue(value,precision,scale)
|
||||||
|
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
|
||||||
|
(Some(self.value), self.precision, self.scale)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// split the self.value(i128) to (high-64 bit, low-64 bit), and
|
||||||
|
/// the precision, scale information is discarded.
|
||||||
|
///
|
||||||
|
/// Return: (high-64 bit, low-64 bit)
|
||||||
|
pub fn split_value(&self) -> (i64, i64) {
|
||||||
|
((self.value >> 64) as i64, self.value as i64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert from precision, scale, a i128 value which
|
||||||
|
/// represents by two i64 value(high-64 bit, low-64 bit).
|
||||||
|
pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
|
||||||
|
let value = (hi as i128) << 64 | lo as i128;
|
||||||
|
Self::new(value, precision, scale)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
|
||||||
|
impl Default for Decimal128 {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
value: 0,
|
||||||
|
precision: 1,
|
||||||
|
scale: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for Decimal128 {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.precision.eq(&other.precision)
|
||||||
|
&& self.scale.eq(&other.scale)
|
||||||
|
&& self.value.eq(&other.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two decimal values can be compared if they have the same precision and scale.
|
||||||
|
impl PartialOrd for Decimal128 {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
if self.precision == other.precision && self.scale == other.scale {
|
||||||
|
return self.value.partial_cmp(&other.value);
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert from string to Decimal128
|
||||||
|
/// If the string length is less than 28, the result of rust_decimal will underflow,
|
||||||
|
/// In this case, use BigDecimal to get accurate result.
|
||||||
|
impl FromStr for Decimal128 {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let len = s.as_bytes().len();
|
||||||
|
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
|
||||||
|
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
|
||||||
|
Ok(Self::from(rd))
|
||||||
|
} else {
|
||||||
|
let bd = BigDecimal::from_str(s).context(ParseBigDecimalStrSnafu { raw: s })?;
|
||||||
|
Self::try_from(bd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Decimal128 {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
format_decimal_str(&self.value.to_string(), self.precision as usize, self.scale)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hash for Decimal128 {
|
||||||
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
state.write_i128(self.value);
|
||||||
|
state.write_u8(self.precision);
|
||||||
|
state.write_i8(self.scale);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Decimal128> for serde_json::Value {
|
||||||
|
fn from(decimal: Decimal128) -> Self {
|
||||||
|
serde_json::Value::String(decimal.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Decimal128> for i128 {
|
||||||
|
fn from(decimal: Decimal128) -> Self {
|
||||||
|
decimal.val()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<i128> for Decimal128 {
|
||||||
|
fn from(value: i128) -> Self {
|
||||||
|
Self {
|
||||||
|
value,
|
||||||
|
precision: DECIMAL128_MAX_PRECISION,
|
||||||
|
scale: DECIMAL128_DEFAULT_SCALE,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert from RustDecimal to Decimal128
|
||||||
|
/// RustDecimal can represent the range is smaller than Decimal128,
|
||||||
|
/// it is safe to convert RustDecimal to Decimal128
|
||||||
|
impl From<RustDecimal> for Decimal128 {
|
||||||
|
fn from(rd: RustDecimal) -> Self {
|
||||||
|
let s = rd.to_string();
|
||||||
|
let precision = (s.len() - s.matches(&['.', '-'][..]).count()) as u8;
|
||||||
|
Self {
|
||||||
|
value: rd.mantissa(),
|
||||||
|
precision,
|
||||||
|
scale: rd.scale() as i8,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try from BigDecimal to Decimal128
|
||||||
|
/// The range that BigDecimal can represent is larger than Decimal128,
|
||||||
|
/// so it is not safe to convert BigDecimal to Decimal128,
|
||||||
|
/// If the BigDecimal is out of range, return error.
|
||||||
|
impl TryFrom<BigDecimal> for Decimal128 {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: BigDecimal) -> Result<Self, Self::Error> {
|
||||||
|
let precision = value.digits();
|
||||||
|
let (big_int, scale) = value.as_bigint_and_exponent();
|
||||||
|
// convert big_int to i128, if convert failed, return error
|
||||||
|
big_int
|
||||||
|
.to_i128()
|
||||||
|
.map(|val| Self::try_new(val, precision as u8, scale as i8))
|
||||||
|
.unwrap_or_else(|| BigDecimalOutOfRangeSnafu { value }.fail())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Port from arrow-rs,
|
||||||
|
/// see https://github.com/Apache/arrow-rs/blob/master/arrow-array/src/types.rs#L1323-L1344
|
||||||
|
fn format_decimal_str(value_str: &str, precision: usize, scale: i8) -> String {
|
||||||
|
let (sign, rest) = match value_str.strip_prefix('-') {
|
||||||
|
Some(stripped) => ("-", stripped),
|
||||||
|
None => ("", value_str),
|
||||||
|
};
|
||||||
|
|
||||||
|
let bound = precision.min(rest.len()) + sign.len();
|
||||||
|
let value_str = &value_str[0..bound];
|
||||||
|
|
||||||
|
if scale == 0 {
|
||||||
|
value_str.to_string()
|
||||||
|
} else if scale < 0 {
|
||||||
|
let padding = value_str.len() + scale.unsigned_abs() as usize;
|
||||||
|
format!("{value_str:0<padding$}")
|
||||||
|
} else if rest.len() > scale as usize {
|
||||||
|
// Decimal separator is in the middle of the string
|
||||||
|
let (whole, decimal) = value_str.split_at(value_str.len() - scale as usize);
|
||||||
|
format!("{whole}.{decimal}")
|
||||||
|
} else {
|
||||||
|
// String has to be padded
|
||||||
|
format!("{}0.{:0>width$}", sign, rest, width = scale as usize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// check whether precision and scale is valid
|
||||||
|
fn valid_precision_and_scale(precision: u8, scale: i8) -> error::Result<()> {
|
||||||
|
if precision == 0 {
|
||||||
|
return InvalidPrecisionOrScaleSnafu {
|
||||||
|
reason: format!(
|
||||||
|
"precision cannot be 0, has to be between [1, {}]",
|
||||||
|
DECIMAL128_MAX_PRECISION
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
if precision > DECIMAL128_MAX_PRECISION {
|
||||||
|
return InvalidPrecisionOrScaleSnafu {
|
||||||
|
reason: format!(
|
||||||
|
"precision {} is greater than max {}",
|
||||||
|
precision, DECIMAL128_MAX_PRECISION
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
if scale > DECIMAL128_MAX_SCALE {
|
||||||
|
return InvalidPrecisionOrScaleSnafu {
|
||||||
|
reason: format!(
|
||||||
|
"scale {} is greater than max {}",
|
||||||
|
scale, DECIMAL128_MAX_SCALE
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
if scale > 0 && scale > precision as i8 {
|
||||||
|
return InvalidPrecisionOrScaleSnafu {
|
||||||
|
reason: format!("scale {} is greater than precision {}", scale, precision),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_common_decimal128() {
|
||||||
|
let decimal = Decimal128::new(123456789, 9, 3);
|
||||||
|
assert_eq!(decimal.to_string(), "123456.789");
|
||||||
|
|
||||||
|
let decimal = Decimal128::try_new(123456789, 9, 0);
|
||||||
|
assert_eq!(decimal.unwrap().to_string(), "123456789");
|
||||||
|
|
||||||
|
let decimal = Decimal128::try_new(123456789, 9, 2);
|
||||||
|
assert_eq!(decimal.unwrap().to_string(), "1234567.89");
|
||||||
|
|
||||||
|
let decimal = Decimal128::try_new(123, 3, -2);
|
||||||
|
assert_eq!(decimal.unwrap().to_string(), "12300");
|
||||||
|
|
||||||
|
// invalid precision or scale
|
||||||
|
|
||||||
|
// precision is 0
|
||||||
|
let decimal = Decimal128::try_new(123, 0, 0);
|
||||||
|
assert!(decimal.is_err());
|
||||||
|
|
||||||
|
// precision is greater than 38
|
||||||
|
let decimal = Decimal128::try_new(123, 39, 0);
|
||||||
|
assert!(decimal.is_err());
|
||||||
|
|
||||||
|
// scale is greater than 38
|
||||||
|
let decimal = Decimal128::try_new(123, 38, 39);
|
||||||
|
assert!(decimal.is_err());
|
||||||
|
|
||||||
|
// scale is greater than precision
|
||||||
|
let decimal = Decimal128::try_new(123, 3, 4);
|
||||||
|
assert!(decimal.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_decimal128_from_str() {
|
||||||
|
// 0 < precision <= 28
|
||||||
|
let decimal = Decimal128::from_str("1234567890.123456789").unwrap();
|
||||||
|
assert_eq!(decimal.to_string(), "1234567890.123456789");
|
||||||
|
assert_eq!(decimal.precision(), 19);
|
||||||
|
assert_eq!(decimal.scale(), 9);
|
||||||
|
|
||||||
|
let decimal = Decimal128::from_str("1234567890.123456789012345678").unwrap();
|
||||||
|
assert_eq!(decimal.to_string(), "1234567890.123456789012345678");
|
||||||
|
assert_eq!(decimal.precision(), 28);
|
||||||
|
assert_eq!(decimal.scale(), 18);
|
||||||
|
|
||||||
|
// 28 < precision <= 38
|
||||||
|
let decimal = Decimal128::from_str("1234567890.1234567890123456789012").unwrap();
|
||||||
|
assert_eq!(decimal.to_string(), "1234567890.1234567890123456789012");
|
||||||
|
assert_eq!(decimal.precision(), 32);
|
||||||
|
assert_eq!(decimal.scale(), 22);
|
||||||
|
|
||||||
|
let decimal = Decimal128::from_str("1234567890.1234567890123456789012345678").unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
decimal.to_string(),
|
||||||
|
"1234567890.1234567890123456789012345678"
|
||||||
|
);
|
||||||
|
assert_eq!(decimal.precision(), 38);
|
||||||
|
assert_eq!(decimal.scale(), 28);
|
||||||
|
|
||||||
|
// precision > 38
|
||||||
|
let decimal = Decimal128::from_str("1234567890.12345678901234567890123456789");
|
||||||
|
assert!(decimal.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_parse_decimal128_speed() {
|
||||||
|
// RustDecimal::from_str: 1.124855167s
|
||||||
|
for _ in 0..1500000 {
|
||||||
|
let _ = RustDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigDecimal::try_from: 6.799290042s
|
||||||
|
for _ in 0..1500000 {
|
||||||
|
let _ = BigDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_decimal128_precision_and_scale() {
|
||||||
|
// precision and scale from Deicmal(1,1) to Decimal(38,38)
|
||||||
|
for precision in 1..=38 {
|
||||||
|
for scale in 1..=precision {
|
||||||
|
let decimal_str = format!("0.{}", "1".repeat(scale as usize));
|
||||||
|
let decimal = Decimal128::from_str(&decimal_str).unwrap();
|
||||||
|
assert_eq!(decimal_str, decimal.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_decimal128_compare() {
|
||||||
|
// the same precision and scale
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
assert!(decimal1 == decimal2);
|
||||||
|
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||||
|
assert!(decimal1 > decimal2);
|
||||||
|
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||||
|
assert!(decimal2 < decimal1);
|
||||||
|
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||||
|
assert!(decimal1 >= decimal2);
|
||||||
|
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||||
|
assert!(decimal2 <= decimal1);
|
||||||
|
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||||
|
assert!(decimal1 != decimal2);
|
||||||
|
|
||||||
|
// different precision and scale cmp is None
|
||||||
|
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||||
|
let decimal2 = Decimal128::from_str("1234567890.123").unwrap();
|
||||||
|
assert_eq!(decimal1.partial_cmp(&decimal2), None);
|
||||||
|
}
|
||||||
|
}
|
||||||
72
src/common/decimal/src/error.rs
Normal file
72
src/common/decimal/src/error.rs
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use bigdecimal::BigDecimal;
|
||||||
|
use common_error::ext::ErrorExt;
|
||||||
|
use common_error::status_code::StatusCode;
|
||||||
|
use common_macro::stack_trace_debug;
|
||||||
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
|
#[derive(Snafu)]
|
||||||
|
#[snafu(visibility(pub))]
|
||||||
|
#[stack_trace_debug]
|
||||||
|
pub enum Error {
|
||||||
|
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
||||||
|
BigDecimalOutOfRange {
|
||||||
|
value: BigDecimal,
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse string to rust decimal, raw: {}", raw))]
|
||||||
|
ParseRustDecimalStr {
|
||||||
|
raw: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: rust_decimal::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse string to big decimal, raw: {}", raw))]
|
||||||
|
ParseBigDecimalStr {
|
||||||
|
raw: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: bigdecimal::ParseBigDecimalError,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
||||||
|
InvalidPrecisionOrScale { reason: String, location: Location },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ErrorExt for Error {
|
||||||
|
fn status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::BigDecimalOutOfRange { .. } => StatusCode::Internal,
|
||||||
|
Error::ParseRustDecimalStr { .. }
|
||||||
|
| Error::InvalidPrecisionOrScale { .. }
|
||||||
|
| Error::ParseBigDecimalStr { .. } => StatusCode::InvalidArguments,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||||
|
match self {
|
||||||
|
Error::BigDecimalOutOfRange { location, .. } => Some(*location),
|
||||||
|
Error::InvalidPrecisionOrScale { location, .. } => Some(*location),
|
||||||
|
Error::ParseRustDecimalStr { .. } | Error::ParseBigDecimalStr { .. } => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub mod bench_decode;
|
pub mod decimal128;
|
||||||
pub mod bench_encode;
|
pub mod error;
|
||||||
pub mod bench_wal;
|
|
||||||
pub mod util;
|
pub use decimal128::Decimal128;
|
||||||
@@ -5,5 +5,5 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
strum.workspace = true
|
strum.workspace = true
|
||||||
|
|||||||
@@ -39,17 +39,25 @@ pub trait ErrorExt: StackError {
|
|||||||
where
|
where
|
||||||
Self: Sized,
|
Self: Sized,
|
||||||
{
|
{
|
||||||
let error = self.last();
|
match self.status_code() {
|
||||||
if let Some(external_error) = error.source() {
|
StatusCode::Unknown | StatusCode::Internal => {
|
||||||
let external_root = external_error.sources().last().unwrap();
|
// masks internal error from end user
|
||||||
|
format!("Internal error: {}", self.status_code() as u32)
|
||||||
if error.to_string().is_empty() {
|
}
|
||||||
format!("{external_root}")
|
_ => {
|
||||||
} else {
|
let error = self.last();
|
||||||
format!("{error}: {external_root}")
|
if let Some(external_error) = error.source() {
|
||||||
|
let external_root = external_error.sources().last().unwrap();
|
||||||
|
|
||||||
|
if error.to_string().is_empty() {
|
||||||
|
format!("{external_root}")
|
||||||
|
} else {
|
||||||
|
format!("{error}: {external_root}")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
format!("{error}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
format!("{error}")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#![feature(error_iter)]
|
#![feature(error_iter)]
|
||||||
|
|
||||||
pub mod ext;
|
pub mod ext;
|
||||||
|
|||||||
@@ -14,10 +14,10 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use strum::EnumString;
|
use strum::{AsRefStr, EnumString};
|
||||||
|
|
||||||
/// Common status code for public API.
|
/// Common status code for public API.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr)]
|
||||||
pub enum StatusCode {
|
pub enum StatusCode {
|
||||||
// ====== Begin of common status code ==============
|
// ====== Begin of common status code ==============
|
||||||
/// Success.
|
/// Success.
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
chrono-tz = "0.6"
|
chrono-tz = "0.6"
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
num = "0.4"
|
num = "0.4"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
|
|||||||
@@ -16,7 +16,8 @@ use std::fmt;
|
|||||||
|
|
||||||
use common_query::error::{self, Result};
|
use common_query::error::{self, Result};
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
use datatypes::arrow::compute::kernels::{arithmetic, cast};
|
use datafusion::arrow::compute::kernels::numeric;
|
||||||
|
use datatypes::arrow::compute::kernels::cast;
|
||||||
use datatypes::arrow::datatypes::DataType;
|
use datatypes::arrow::datatypes::DataType;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::vectors::{Helper, VectorRef};
|
use datatypes::vectors::{Helper, VectorRef};
|
||||||
@@ -51,11 +52,11 @@ impl Function for RateFunction {
|
|||||||
let val = &columns[0].to_arrow_array();
|
let val = &columns[0].to_arrow_array();
|
||||||
let val_0 = val.slice(0, val.len() - 1);
|
let val_0 = val.slice(0, val.len() - 1);
|
||||||
let val_1 = val.slice(1, val.len() - 1);
|
let val_1 = val.slice(1, val.len() - 1);
|
||||||
let dv = arithmetic::subtract_dyn(&val_1, &val_0).context(error::ArrowComputeSnafu)?;
|
let dv = numeric::sub(&val_1, &val_0).context(error::ArrowComputeSnafu)?;
|
||||||
let ts = &columns[1].to_arrow_array();
|
let ts = &columns[1].to_arrow_array();
|
||||||
let ts_0 = ts.slice(0, ts.len() - 1);
|
let ts_0 = ts.slice(0, ts.len() - 1);
|
||||||
let ts_1 = ts.slice(1, ts.len() - 1);
|
let ts_1 = ts.slice(1, ts.len() - 1);
|
||||||
let dt = arithmetic::subtract_dyn(&ts_1, &ts_0).context(error::ArrowComputeSnafu)?;
|
let dt = numeric::sub(&ts_1, &ts_0).context(error::ArrowComputeSnafu)?;
|
||||||
|
|
||||||
let dv = cast::cast(&dv, &DataType::Float64).context(error::TypeCastSnafu {
|
let dv = cast::cast(&dv, &DataType::Float64).context(error::TypeCastSnafu {
|
||||||
typ: DataType::Float64,
|
typ: DataType::Float64,
|
||||||
@@ -63,7 +64,7 @@ impl Function for RateFunction {
|
|||||||
let dt = cast::cast(&dt, &DataType::Float64).context(error::TypeCastSnafu {
|
let dt = cast::cast(&dt, &DataType::Float64).context(error::TypeCastSnafu {
|
||||||
typ: DataType::Float64,
|
typ: DataType::Float64,
|
||||||
})?;
|
})?;
|
||||||
let rate = arithmetic::divide_dyn(&dv, &dt).context(error::ArrowComputeSnafu)?;
|
let rate = numeric::div(&dv, &dt).context(error::ArrowComputeSnafu)?;
|
||||||
let v = Helper::try_into_vector(&rate).context(error::FromArrowArraySnafu)?;
|
let v = Helper::try_into_vector(&rate).context(error::FromArrowArraySnafu)?;
|
||||||
|
|
||||||
Ok(v)
|
Ok(v)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
mod greatest;
|
mod greatest;
|
||||||
mod to_unixtime;
|
mod to_unixtime;
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use common_query::error::{
|
|||||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||||
};
|
};
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
|
use datafusion::arrow::compute::kernels::cmp::gt;
|
||||||
use datatypes::arrow::array::AsArray;
|
use datatypes::arrow::array::AsArray;
|
||||||
use datatypes::arrow::compute::cast;
|
use datatypes::arrow::compute::cast;
|
||||||
use datatypes::arrow::compute::kernels::comparison::gt_dyn;
|
|
||||||
use datatypes::arrow::compute::kernels::zip;
|
use datatypes::arrow::compute::kernels::zip;
|
||||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
@@ -72,7 +72,7 @@ impl Function for GreatestFunction {
|
|||||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||||
.context(ArrowComputeSnafu)?;
|
.context(ArrowComputeSnafu)?;
|
||||||
let column2 = column2.as_primitive::<Date32Type>();
|
let column2 = column2.as_primitive::<Date32Type>();
|
||||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||||
let result =
|
let result =
|
||||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||||
@@ -82,7 +82,7 @@ impl Function for GreatestFunction {
|
|||||||
let column1 = column1.as_primitive::<Date32Type>();
|
let column1 = column1.as_primitive::<Date32Type>();
|
||||||
let column2 = columns[1].to_arrow_array();
|
let column2 = columns[1].to_arrow_array();
|
||||||
let column2 = column2.as_primitive::<Date32Type>();
|
let column2 = column2.as_primitive::<Date32Type>();
|
||||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||||
let result =
|
let result =
|
||||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||||
|
|||||||
@@ -18,36 +18,50 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::{Date, DateTime, Timestamp};
|
||||||
use common_time::Timestamp;
|
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::types::TimestampType;
|
use datatypes::vectors::{Int64Vector, VectorRef};
|
||||||
use datatypes::vectors::{
|
|
||||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
|
||||||
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
|
|
||||||
};
|
|
||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
|
|
||||||
use crate::scalars::function::{Function, FunctionContext};
|
use crate::scalars::function::{Function, FunctionContext};
|
||||||
|
|
||||||
|
/// A function to convert the column into the unix timestamp in seconds.
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct ToUnixtimeFunction;
|
pub struct ToUnixtimeFunction;
|
||||||
|
|
||||||
const NAME: &str = "to_unixtime";
|
const NAME: &str = "to_unixtime";
|
||||||
|
|
||||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||||
match Timestamp::from_str(arg) {
|
if let Ok(dt) = DateTime::from_str(arg) {
|
||||||
Ok(ts) => {
|
return Some(dt.val() / 1000);
|
||||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
|
||||||
Some(ts.value().div_euclid(sec_mul))
|
|
||||||
}
|
|
||||||
Err(_err) => None,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Ok(ts) = Timestamp::from_str(arg) {
|
||||||
|
return Some(ts.split().0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(date) = Date::from_str(arg) {
|
||||||
|
return Some(date.to_secs());
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
|
fn convert_timestamps_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||||
(0..vector.len())
|
(0..vector.len())
|
||||||
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
|
.map(|i| vector.get(i).as_timestamp().map(|ts| ts.split().0))
|
||||||
|
.collect::<Vec<Option<i64>>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_dates_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||||
|
(0..vector.len())
|
||||||
|
.map(|i| vector.get(i).as_date().map(|dt| dt.to_secs()))
|
||||||
|
.collect::<Vec<Option<i64>>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_datetimes_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||||
|
(0..vector.len())
|
||||||
|
.map(|i| vector.get(i).as_datetime().map(|dt| dt.val() / 1000))
|
||||||
.collect::<Vec<Option<i64>>>()
|
.collect::<Vec<Option<i64>>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,6 +81,8 @@ impl Function for ToUnixtimeFunction {
|
|||||||
ConcreteDataType::string_datatype(),
|
ConcreteDataType::string_datatype(),
|
||||||
ConcreteDataType::int32_datatype(),
|
ConcreteDataType::int32_datatype(),
|
||||||
ConcreteDataType::int64_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
|
ConcreteDataType::date_datatype(),
|
||||||
|
ConcreteDataType::datetime_datatype(),
|
||||||
ConcreteDataType::timestamp_second_datatype(),
|
ConcreteDataType::timestamp_second_datatype(),
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
@@ -87,51 +103,29 @@ impl Function for ToUnixtimeFunction {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let vector = &columns[0];
|
||||||
|
|
||||||
match columns[0].data_type() {
|
match columns[0].data_type() {
|
||||||
ConcreteDataType::String(_) => {
|
ConcreteDataType::String(_) => Ok(Arc::new(Int64Vector::from(
|
||||||
let array = columns[0].to_arrow_array();
|
(0..vector.len())
|
||||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||||
Ok(Arc::new(Int64Vector::from(
|
.collect::<Vec<_>>(),
|
||||||
(0..vector.len())
|
))),
|
||||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||||
let array = columns[0].to_arrow_array();
|
// Safety: cast always successfully at here
|
||||||
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
|
Ok(vector.cast(&ConcreteDataType::int64_datatype()).unwrap())
|
||||||
}
|
}
|
||||||
ConcreteDataType::Timestamp(ts) => {
|
ConcreteDataType::Date(_) => {
|
||||||
let array = columns[0].to_arrow_array();
|
let seconds = convert_dates_to_seconds(vector);
|
||||||
let value = match ts {
|
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||||
TimestampType::Second(_) => {
|
}
|
||||||
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
|
ConcreteDataType::DateTime(_) => {
|
||||||
array
|
let seconds = convert_datetimes_to_seconds(vector);
|
||||||
)
|
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||||
.unwrap());
|
}
|
||||||
process_vector(&vector)
|
ConcreteDataType::Timestamp(_) => {
|
||||||
}
|
let seconds = convert_timestamps_to_seconds(vector);
|
||||||
TimestampType::Millisecond(_) => {
|
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||||
let vector = paste::expr!(
|
|
||||||
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
|
|
||||||
);
|
|
||||||
process_vector(&vector)
|
|
||||||
}
|
|
||||||
TimestampType::Microsecond(_) => {
|
|
||||||
let vector = paste::expr!(
|
|
||||||
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
|
|
||||||
);
|
|
||||||
process_vector(&vector)
|
|
||||||
}
|
|
||||||
TimestampType::Nanosecond(_) => {
|
|
||||||
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
|
|
||||||
array
|
|
||||||
)
|
|
||||||
.unwrap());
|
|
||||||
process_vector(&vector)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Arc::new(Int64Vector::from(value)))
|
|
||||||
}
|
}
|
||||||
_ => UnsupportedInputDataTypeSnafu {
|
_ => UnsupportedInputDataTypeSnafu {
|
||||||
function: NAME,
|
function: NAME,
|
||||||
@@ -151,11 +145,11 @@ impl fmt::Display for ToUnixtimeFunction {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_query::prelude::TypeSignature;
|
use common_query::prelude::TypeSignature;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::scalars::ScalarVector;
|
|
||||||
use datatypes::timestamp::TimestampSecond;
|
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{StringVector, TimestampSecondVector};
|
use datatypes::vectors::{
|
||||||
|
DateTimeVector, DateVector, StringVector, TimestampMillisecondVector, TimestampSecondVector,
|
||||||
|
};
|
||||||
|
|
||||||
use super::{ToUnixtimeFunction, *};
|
use super::{ToUnixtimeFunction, *};
|
||||||
use crate::scalars::Function;
|
use crate::scalars::Function;
|
||||||
@@ -170,18 +164,20 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert!(matches!(f.signature(),
|
assert!(matches!(f.signature(),
|
||||||
Signature {
|
Signature {
|
||||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||||
volatility: Volatility::Immutable
|
volatility: Volatility::Immutable
|
||||||
} if valid_types == vec![
|
} if valid_types == vec![
|
||||||
ConcreteDataType::string_datatype(),
|
ConcreteDataType::string_datatype(),
|
||||||
ConcreteDataType::int32_datatype(),
|
ConcreteDataType::int32_datatype(),
|
||||||
ConcreteDataType::int64_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
ConcreteDataType::timestamp_second_datatype(),
|
ConcreteDataType::date_datatype(),
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
ConcreteDataType::datetime_datatype(),
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
ConcreteDataType::timestamp_second_datatype(),
|
||||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
]
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||||
|
]
|
||||||
));
|
));
|
||||||
|
|
||||||
let times = vec![
|
let times = vec![
|
||||||
@@ -212,26 +208,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_int_to_unixtime() {
|
fn test_int_to_unixtime() {
|
||||||
let f = ToUnixtimeFunction;
|
let f = ToUnixtimeFunction;
|
||||||
assert_eq!("to_unixtime", f.name());
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
f.return_type(&[]).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(matches!(f.signature(),
|
|
||||||
Signature {
|
|
||||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
|
||||||
volatility: Volatility::Immutable
|
|
||||||
} if valid_types == vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::int32_datatype(),
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
ConcreteDataType::timestamp_second_datatype(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
|
||||||
]
|
|
||||||
));
|
|
||||||
|
|
||||||
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
||||||
let results = [Some(3), None, Some(5), None];
|
let results = [Some(3), None, Some(5), None];
|
||||||
@@ -254,38 +230,13 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_timestamp_to_unixtime() {
|
fn test_date_to_unixtime() {
|
||||||
let f = ToUnixtimeFunction;
|
let f = ToUnixtimeFunction;
|
||||||
assert_eq!("to_unixtime", f.name());
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
f.return_type(&[]).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(matches!(f.signature(),
|
let times = vec![Some(123), None, Some(42), None];
|
||||||
Signature {
|
let results = [Some(10627200), None, Some(3628800), None];
|
||||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
let date_vector = DateVector::from(times.clone());
|
||||||
volatility: Volatility::Immutable
|
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||||
} if valid_types == vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::int32_datatype(),
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
ConcreteDataType::timestamp_second_datatype(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
|
||||||
]
|
|
||||||
));
|
|
||||||
|
|
||||||
let times: Vec<Option<TimestampSecond>> = vec![
|
|
||||||
Some(TimestampSecond::new(123)),
|
|
||||||
None,
|
|
||||||
Some(TimestampSecond::new(42)),
|
|
||||||
None,
|
|
||||||
];
|
|
||||||
let results = [Some(123), None, Some(42), None];
|
|
||||||
let ts_vector: TimestampSecondVector = build_vector_from_slice(×);
|
|
||||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
|
||||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||||
assert_eq!(4, vector.len());
|
assert_eq!(4, vector.len());
|
||||||
for (i, _t) in times.iter().enumerate() {
|
for (i, _t) in times.iter().enumerate() {
|
||||||
@@ -303,11 +254,73 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
|
#[test]
|
||||||
let mut builder = T::Builder::with_capacity(items.len());
|
fn test_datetime_to_unixtime() {
|
||||||
for item in items {
|
let f = ToUnixtimeFunction;
|
||||||
builder.push(*item);
|
|
||||||
|
let times = vec![Some(123000), None, Some(42000), None];
|
||||||
|
let results = [Some(123), None, Some(42), None];
|
||||||
|
let date_vector = DateTimeVector::from(times.clone());
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||||
|
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(4, vector.len());
|
||||||
|
for (i, _t) in times.iter().enumerate() {
|
||||||
|
let v = vector.get(i);
|
||||||
|
if i == 1 || i == 3 {
|
||||||
|
assert_eq!(Value::Null, v);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match v {
|
||||||
|
Value::Int64(ts) => {
|
||||||
|
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_timestamp_to_unixtime() {
|
||||||
|
let f = ToUnixtimeFunction;
|
||||||
|
|
||||||
|
let times = vec![Some(123), None, Some(42), None];
|
||||||
|
let results = [Some(123), None, Some(42), None];
|
||||||
|
let ts_vector = TimestampSecondVector::from(times.clone());
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||||
|
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(4, vector.len());
|
||||||
|
for (i, _t) in times.iter().enumerate() {
|
||||||
|
let v = vector.get(i);
|
||||||
|
if i == 1 || i == 3 {
|
||||||
|
assert_eq!(Value::Null, v);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match v {
|
||||||
|
Value::Int64(ts) => {
|
||||||
|
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let times = vec![Some(123000), None, Some(42000), None];
|
||||||
|
let results = [Some(123), None, Some(42), None];
|
||||||
|
let ts_vector = TimestampMillisecondVector::from(times.clone());
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||||
|
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(4, vector.len());
|
||||||
|
for (i, _t) in times.iter().enumerate() {
|
||||||
|
let v = vector.get(i);
|
||||||
|
if i == 1 || i == 3 {
|
||||||
|
assert_eq!(Value::Null, v);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match v {
|
||||||
|
Value::Int64(ts) => {
|
||||||
|
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
builder.finish()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,19 +6,19 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-runtime = { workspace = true }
|
common-runtime.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
reqwest = { workspace = true }
|
reqwest.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
hyper = { version = "0.14", features = ["full"] }
|
hyper = { version = "0.14", features = ["full"] }
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
common-version = { workspace = true }
|
common-version.workspace = true
|
||||||
|
|||||||
@@ -57,7 +57,10 @@ impl GreptimeDBTelemetryTask {
|
|||||||
task_fn: BoxedTaskFunction<Error>,
|
task_fn: BoxedTaskFunction<Error>,
|
||||||
should_report: Arc<AtomicBool>,
|
should_report: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
GreptimeDBTelemetryTask::Enable((RepeatedTask::new(interval, task_fn), should_report))
|
GreptimeDBTelemetryTask::Enable((
|
||||||
|
RepeatedTask::new(interval, task_fn).with_initial_delay(Some(Duration::ZERO)),
|
||||||
|
should_report,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disable() -> Self {
|
pub fn disable() -> Self {
|
||||||
@@ -207,6 +210,7 @@ pub struct GreptimeDBTelemetry {
|
|||||||
working_home: Option<String>,
|
working_home: Option<String>,
|
||||||
telemetry_url: &'static str,
|
telemetry_url: &'static str,
|
||||||
should_report: Arc<AtomicBool>,
|
should_report: Arc<AtomicBool>,
|
||||||
|
report_times: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -239,6 +243,7 @@ impl GreptimeDBTelemetry {
|
|||||||
client: client.ok(),
|
client: client.ok(),
|
||||||
telemetry_url: TELEMETRY_URL,
|
telemetry_url: TELEMETRY_URL,
|
||||||
should_report,
|
should_report,
|
||||||
|
report_times: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,8 +261,11 @@ impl GreptimeDBTelemetry {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(client) = self.client.as_ref() {
|
if let Some(client) = self.client.as_ref() {
|
||||||
info!("reporting greptimedb version: {:?}", data);
|
if self.report_times == 0 {
|
||||||
|
info!("reporting greptimedb version: {:?}", data);
|
||||||
|
}
|
||||||
let result = client.post(self.telemetry_url).json(&data).send().await;
|
let result = client.post(self.telemetry_url).json(&data).send().await;
|
||||||
|
self.report_times += 1;
|
||||||
debug!("report version result: {:?}", result);
|
debug!("report version result: {:?}", result);
|
||||||
result.ok()
|
result.ok()
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -5,18 +5,18 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
table = { workspace = true }
|
table.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
|
|||||||
@@ -158,6 +158,7 @@ mod tests {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
location: None,
|
location: None,
|
||||||
}],
|
}],
|
||||||
@@ -199,6 +200,7 @@ mod tests {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
location: Some(Location {
|
location: Some(Location {
|
||||||
location_type: LocationType::First.into(),
|
location_type: LocationType::First.into(),
|
||||||
@@ -213,6 +215,7 @@ mod tests {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
location: Some(Location {
|
location: Some(Location {
|
||||||
location_type: LocationType::After.into(),
|
location_type: LocationType::After.into(),
|
||||||
|
|||||||
@@ -36,14 +36,16 @@ pub fn to_table_delete_request(
|
|||||||
values,
|
values,
|
||||||
null_mask,
|
null_mask,
|
||||||
datatype,
|
datatype,
|
||||||
|
datatype_extension,
|
||||||
..
|
..
|
||||||
} in request.key_columns
|
} in request.key_columns
|
||||||
{
|
{
|
||||||
let Some(values) = values else { continue };
|
let Some(values) = values else { continue };
|
||||||
|
|
||||||
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
|
let datatype: ConcreteDataType =
|
||||||
.context(ColumnDataTypeSnafu)?
|
ColumnDataTypeWrapper::try_new(datatype, datatype_extension)
|
||||||
.into();
|
.context(ColumnDataTypeSnafu)?
|
||||||
|
.into();
|
||||||
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
|
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
|
||||||
|
|
||||||
ensure!(
|
ensure!(
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ mod tests {
|
|||||||
nullable: bool,
|
nullable: bool,
|
||||||
) -> error::Result<ColumnSchema> {
|
) -> error::Result<ColumnSchema> {
|
||||||
let datatype_wrapper =
|
let datatype_wrapper =
|
||||||
ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
|
ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
|
||||||
|
|
||||||
Ok(ColumnSchema::new(
|
Ok(ColumnSchema::new(
|
||||||
column_name,
|
column_name,
|
||||||
@@ -170,7 +170,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "host")
|
.find(|c| c.name == "host")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -184,7 +185,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "cpu")
|
.find(|c| c.name == "cpu")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -198,7 +200,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "memory")
|
.find(|c| c.name == "memory")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -212,7 +215,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "time")
|
.find(|c| c.name == "time")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -226,7 +230,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "interval")
|
.find(|c| c.name == "interval")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -240,7 +245,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "duration")
|
.find(|c| c.name == "duration")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -254,7 +260,8 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.name == "ts")
|
.find(|c| c.name == "ts")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data_type
|
.data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -284,8 +291,11 @@ mod tests {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::string_datatype(),
|
ConcreteDataType::string_datatype(),
|
||||||
ConcreteDataType::from(
|
ConcreteDataType::from(
|
||||||
ColumnDataTypeWrapper::try_new(host_column.column_def.as_ref().unwrap().data_type)
|
ColumnDataTypeWrapper::try_new(
|
||||||
.unwrap()
|
host_column.column_def.as_ref().unwrap().data_type,
|
||||||
|
None
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -294,7 +304,8 @@ mod tests {
|
|||||||
ConcreteDataType::float64_datatype(),
|
ConcreteDataType::float64_datatype(),
|
||||||
ConcreteDataType::from(
|
ConcreteDataType::from(
|
||||||
ColumnDataTypeWrapper::try_new(
|
ColumnDataTypeWrapper::try_new(
|
||||||
memory_column.column_def.as_ref().unwrap().data_type
|
memory_column.column_def.as_ref().unwrap().data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -304,8 +315,11 @@ mod tests {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||||
ConcreteDataType::from(
|
ConcreteDataType::from(
|
||||||
ColumnDataTypeWrapper::try_new(time_column.column_def.as_ref().unwrap().data_type)
|
ColumnDataTypeWrapper::try_new(
|
||||||
.unwrap()
|
time_column.column_def.as_ref().unwrap().data_type,
|
||||||
|
None
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -314,7 +328,8 @@ mod tests {
|
|||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||||
ConcreteDataType::from(
|
ConcreteDataType::from(
|
||||||
ColumnDataTypeWrapper::try_new(
|
ColumnDataTypeWrapper::try_new(
|
||||||
interval_column.column_def.as_ref().unwrap().data_type
|
interval_column.column_def.as_ref().unwrap().data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -326,7 +341,8 @@ mod tests {
|
|||||||
ConcreteDataType::duration_millisecond_datatype(),
|
ConcreteDataType::duration_millisecond_datatype(),
|
||||||
ConcreteDataType::from(
|
ConcreteDataType::from(
|
||||||
ColumnDataTypeWrapper::try_new(
|
ColumnDataTypeWrapper::try_new(
|
||||||
duration_column.column_def.as_ref().unwrap().data_type
|
duration_column.column_def.as_ref().unwrap().data_type,
|
||||||
|
None
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
)
|
)
|
||||||
@@ -360,6 +376,7 @@ mod tests {
|
|||||||
values: Some(host_vals),
|
values: Some(host_vals),
|
||||||
null_mask: vec![0],
|
null_mask: vec![0],
|
||||||
datatype: ColumnDataType::String as i32,
|
datatype: ColumnDataType::String as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let cpu_vals = Values {
|
let cpu_vals = Values {
|
||||||
@@ -372,6 +389,7 @@ mod tests {
|
|||||||
values: Some(cpu_vals),
|
values: Some(cpu_vals),
|
||||||
null_mask: vec![2],
|
null_mask: vec![2],
|
||||||
datatype: ColumnDataType::Float64 as i32,
|
datatype: ColumnDataType::Float64 as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mem_vals = Values {
|
let mem_vals = Values {
|
||||||
@@ -384,6 +402,7 @@ mod tests {
|
|||||||
values: Some(mem_vals),
|
values: Some(mem_vals),
|
||||||
null_mask: vec![1],
|
null_mask: vec![1],
|
||||||
datatype: ColumnDataType::Float64 as i32,
|
datatype: ColumnDataType::Float64 as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let time_vals = Values {
|
let time_vals = Values {
|
||||||
@@ -396,6 +415,7 @@ mod tests {
|
|||||||
values: Some(time_vals),
|
values: Some(time_vals),
|
||||||
null_mask: vec![0],
|
null_mask: vec![0],
|
||||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let interval1 = IntervalMonthDayNano {
|
let interval1 = IntervalMonthDayNano {
|
||||||
@@ -418,6 +438,7 @@ mod tests {
|
|||||||
values: Some(interval_vals),
|
values: Some(interval_vals),
|
||||||
null_mask: vec![0],
|
null_mask: vec![0],
|
||||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let duration_vals = Values {
|
let duration_vals = Values {
|
||||||
@@ -430,6 +451,7 @@ mod tests {
|
|||||||
values: Some(duration_vals),
|
values: Some(duration_vals),
|
||||||
null_mask: vec![0],
|
null_mask: vec![0],
|
||||||
datatype: ColumnDataType::DurationMillisecond as i32,
|
datatype: ColumnDataType::DurationMillisecond as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let ts_vals = Values {
|
let ts_vals = Values {
|
||||||
@@ -442,6 +464,7 @@ mod tests {
|
|||||||
values: Some(ts_vals),
|
values: Some(ts_vals),
|
||||||
null_mask: vec![0],
|
null_mask: vec![0],
|
||||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
(
|
(
|
||||||
|
|||||||
@@ -121,6 +121,7 @@ pub fn build_create_table_expr(
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type,
|
semantic_type,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
column_defs.push(column_def);
|
column_defs.push(column_def);
|
||||||
}
|
}
|
||||||
@@ -161,6 +162,7 @@ pub fn extract_new_columns(
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: expr.semantic_type,
|
semantic_type: expr.semantic_type,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
});
|
});
|
||||||
AddColumn {
|
AddColumn {
|
||||||
column_def,
|
column_def,
|
||||||
|
|||||||
@@ -5,25 +5,25 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
arrow-flight.workspace = true
|
arrow-flight.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
backtrace = "0.3"
|
backtrace = "0.3"
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-runtime = { workspace = true }
|
common-runtime.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
flatbuffers = "23.1"
|
flatbuffers = "23.1"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
tower = "0.4"
|
tower = "0.4"
|
||||||
|
|||||||
@@ -163,7 +163,14 @@ impl ChannelManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
|
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
|
||||||
let mut endpoint = Endpoint::new(format!("http://{addr}")).context(CreateChannelSnafu)?;
|
let http_prefix = if self.client_tls_config.is_some() {
|
||||||
|
"https"
|
||||||
|
} else {
|
||||||
|
"http"
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut endpoint =
|
||||||
|
Endpoint::new(format!("{http_prefix}://{addr}")).context(CreateChannelSnafu)?;
|
||||||
|
|
||||||
if let Some(dur) = self.config.timeout {
|
if let Some(dur) = self.config.timeout {
|
||||||
endpoint = endpoint.timeout(dur);
|
endpoint = endpoint.timeout(dur);
|
||||||
|
|||||||
@@ -241,7 +241,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let flight_data = batches_to_flight_data(
|
let flight_data = batches_to_flight_data(
|
||||||
arrow_schema,
|
&arrow_schema,
|
||||||
vec![
|
vec![
|
||||||
batch1.clone().into_df_record_batch(),
|
batch1.clone().into_df_record_batch(),
|
||||||
batch2.clone().into_df_record_batch(),
|
batch2.clone().into_df_record_batch(),
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user