Compare commits

...

79 Commits

Author SHA1 Message Date
Yingwen
506dc20765 fix: last non null iter not init (#4687) 2024-09-06 04:13:23 +00:00
Lei, HUANG
114772ba87 chore: bump version v0.9.3 (#4684) 2024-09-06 02:31:41 +00:00
liyang
89a3da8a3a chore(dockerfile): remove mysql and postgresql clients in greptimedb image (#4685) 2024-09-05 16:00:53 +00:00
jeremyhi
8814695b58 feat: invalidate cache via invalidator on region migration (#4682)
feat: invalidate table via invalidator on region migration
2024-09-05 06:15:38 +00:00
Lanqing Yang
86cef648cd feat: add more spans to mito engine (#4643)
feat: add more span on mito engine
2024-09-05 06:13:22 +00:00
Ning Sun
e476e36647 feat: add geohash and h3 as built-in functions (#4656)
* feat: add built-in functions h3 and geohash

* tests: add sqlness tests for geo functions

* doc: correct h3 comment

* fix: lint error

* fix: toml format

* refactor: address review comments

* test: add more sqlness cases

* Apply suggestions from code review

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 04:42:29 +00:00
shuiyisong
4781b327f3 fix: ref to auth err (#4681)
* fix: ref to auth err

* fix: typo
2024-09-05 04:05:39 +00:00
LFC
3e4a69017d build: add mysql and postgresql clients to greptimedb image (#4677) 2024-09-04 11:38:47 +00:00
LFC
d43e31c7ed feat: schedule compaction when adding sst files by editing region (#4648)
* feat: schedule compaction when adding sst files by editing region

* add minimum time interval for two successive compactions

* resolve PR comments
2024-09-04 10:10:07 +00:00
discord9
19e2a9d44b feat: change log level dynamically (#4653)
* feat: add dyn_log handle

* feat: use reload handle

* chore: per review
2024-09-04 07:54:50 +00:00
zyy17
8453df1392 refactor: make init_global_logging() clean and add log_format (#4657)
refactor: refine the code logic of init_global_logging and add json output format
2024-09-04 03:04:51 +00:00
Ruihang Xia
8ca35a4a1a fix: use number of partitions as parallilism in region scanner (#4669)
* fix: use number of partitions as parallilism in region scanner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Co-authored-by: Lei HUANG <mrsatangel@gmail.com>

* order by ts

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* debug pring time range

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei HUANG <mrsatangel@gmail.com>
2024-09-03 13:42:38 +00:00
Ruihang Xia
93f202694c refactor: remove unused error variants (#4666)
* add python script

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix all negative cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 13:19:38 +00:00
Weny Xu
b52e3c694a chore(ci): set etcd resources limits (#4665) 2024-09-03 07:25:23 +00:00
dennis zhuang
a612b67470 feat: supports name in object storage config (#4630)
* feat: supports name in object storage config

* fix: integration test

* fix: integration test

* fix: update sample config

* fix: config api test
2024-09-03 07:02:55 +00:00
jeremyhi
9b03940e03 chore: refactor metadata key value trait (#4664) 2024-09-03 07:00:24 +00:00
jeremyhi
8d6cd8ae16 feat: export import database (#4654)
* feat: export database create sql

* feat: import create database

* Update src/cmd/src/cli/export.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/cli/import.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/error.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: make show create fail fast

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 04:45:25 +00:00
dennis zhuang
8f4ec536de feat: grpc writing supports TTL hint (#4651) 2024-09-03 02:15:01 +00:00
zyy17
f0e2d6e663 fix: use 'target' for 'actions-rust-lang/setup-rust-toolchain' to fix cross build failed (#4661) 2024-09-02 06:11:12 +00:00
Weny Xu
306bd25c64 fix: expose missing options for initializing regions (#4660)
* fix: expose `init_regions_in_background` and `init_regions_parallelism` opts

* fix: ci
2024-09-02 03:11:18 +00:00
zyy17
ddafcc678c ci: disable macos integration test and some minor refactoring (#4658) 2024-09-02 03:06:17 +00:00
Weny Xu
2564b5daee fix: correct otlp endpoint formatting (#4646) 2024-09-02 02:59:50 +00:00
Lei, HUANG
37dcf34bb9 fix(mito): avoid caching empty batches in row group (#4652)
* fix: avoid caching empty batches in row group

* fix: clippy

* Update tests/cases/standalone/common/select/last_value.sql

* fix: sqlness
2024-09-02 02:43:00 +00:00
Yingwen
8eda36bfe3 feat: remove files from the write cache in purger (#4655)
* feat: remove files from the write cache in purger

* chore: fix typo
2024-08-31 04:19:52 +00:00
Ruihang Xia
68b59e0e5e feat: remove the requirement that partition column must be PK (#4647)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-31 03:16:01 +00:00
Ruihang Xia
a37aeb2814 feat: initialize partition range from ScanInput (#4635)
* feat: initialize partition range from ScanInput

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use num_rows instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add todo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup unordered scan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/mito2/src/read/scan_region.rs

Co-authored-by: jeremyhi <jiachun_feng@proton.me>

* leave unordered scan unchanged

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2024-08-30 07:30:37 +00:00
jeremyhi
f641c562c2 feat: show create database (#4642)
* feat: show create database

* feat: add sqlness test

* chore: reorder mod and use

* feat: show create schema

* Update src/frontend/src/instance.rs
2024-08-30 03:58:11 +00:00
Lanqing Yang
9286e963e7 chore: adding heartbeat sent/recv counts in greptimedb nodes (#4624)
obs: adding heartbeat sent/recv counts in greptimedb nodes
2024-08-30 03:57:16 +00:00
LFC
8ea4f67e4b refactor: reduce a object store "stat" call (#4645) 2024-08-30 03:31:19 +00:00
jeremyhi
5e4bac2633 feat: import cli tool (#4639)
* feat: import create tables

* feat: import databasse

* fix: export view schema
2024-08-29 09:32:21 +00:00
LFC
d45b04180c feat: pre-download the ingested sst (#4636)
* refactor: pre-read the ingested sst file in object store to fill the local cache to accelerate first query

* feat: pre-download the ingested SST from remote to accelerate following reads

* resolve PR comments

* resolve PR comments
2024-08-29 08:36:41 +00:00
discord9
8c8499ce53 perf(flow): Map&Reduce Operator use batch to reduce alloc (#4567)
* feat: partial impl mfp

* feat: eval batch inner

* chore: fmt

* feat: mfp eval_batch

* WIP

* feat: Collection generic over row&Batch

* feat: render source batch

* chore: chore

* feat: render mfp batch

* feat: render reduce batch(WIP)

* feat(WIP): render reduce

* feat: reduce batch

* feat: render sink batch

* feat: render constant batch

* chore: error handling& mfp batch test

* test: mfp batch

* chore: rm import

* test: render reduce batch

* chore: add TODO

* chore: per bot review

* refactor: per review

* chore: cmt

* chore: rename

* docs: update no panic
2024-08-29 07:28:13 +00:00
Weny Xu
79f40a762b fix: set selector_result_cache_size in unit test again (#4641) 2024-08-29 07:14:40 +00:00
jeremyhi
b062d8515d feat: copy database ignores view and temporary tables (#4640)
feat: copy database ingores view and temporary tables
2024-08-29 06:17:51 +00:00
discord9
9f9c1dab60 feat(flow): use DataFusion's optimizer (#4489)
* feat: use datafusion optimization

refactor: mv `sql_to_flow_plan` elsewhere

feat(WIP): use df optimization

WIP analyzer rule

feat(WIP): avg expander

fix: transform avg expander

fix: avg expand

feat: names from substrait

fix: avg rewrite

test: update `test_avg`&`test_avg_group_by`

test: fix `test_sum`

test: fix some tests

chore: remove unused flow plan transform

feat: tumble expander

test: update tests

* chore: clippy

* fix: tumble lose `group expr`

* test: sqlness test update

* test: rm unused cast

* test: simplify sqlness

* refactor: per review

* chore: after rebase

* fix: remove a outdated test

* test: add comment

* fix: report error when not literal

* chore: update sqlness test after rebase

* refactor: per review
2024-08-29 02:52:00 +00:00
dennis zhuang
841e66c810 fix: config api and export metrics default database (#4633) 2024-08-28 14:28:49 +00:00
shuiyisong
d1c635085c chore: modify grafana config to accord with version 9 (#4634)
chore: update grafana config to accord with version 9
2024-08-28 12:53:35 +00:00
Weny Xu
47657ebbc8 feat: replay WAL entries respect index (#4565)
* feat(log_store): use new `Consumer`

* feat: add `from_peer_id`

* feat: read WAL entries respect index

* test: add test for `build_region_wal_index_iterator`

* fix: keep the handle

* fix: incorrect last index

* fix: replay last entry id may be greater than expected

* chore: remove unused code

* chore: apply suggestions from CR

* chore: rename `datanode_id` to `location_id`

* chore: rename `from_peer_id` to `location_id`

* chore: rename `from_peer_id` to `location_id`

* chore: apply suggestions from CR
2024-08-28 11:37:18 +00:00
Ruihang Xia
64ae32def0 feat: remove some redundent clone/conversion on constructing MergeScan stream (#4632)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-28 08:52:09 +00:00
Weny Xu
744946957e fix: set selector_result_cache_size in unit test (#4631) 2024-08-28 07:24:17 +00:00
Ruihang Xia
d5455db2d5 fix: update properties on updating partitions (#4627)
* fix: update properties on updating partitions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add unit test and handle insufficient ranges

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-28 07:06:01 +00:00
Yingwen
28bf549907 fix: fallback to window size in manifest (#4629) 2024-08-28 06:43:56 +00:00
zyy17
4ea412249a ci: add check-builder-rust-version job in release and change release-dev-builder-images trigger condition (#4615) 2024-08-27 16:59:01 +00:00
zyy17
eacc7bc471 refactor: add app in greptime_app_version metric (#4626)
refactor: add app in greptime_app_version metric
2024-08-27 11:19:01 +00:00
Ruihang Xia
b72d3bc71d build(deps): bump backon to 1.0 (#4625)
* build(deps): bump backon to 1.0

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-27 09:38:12 +00:00
Ning Sun
0b102ef846 ci: improve toolchain resolution in ci (#4614)
* ci: improve toolchain resolution in ci

* fix: yaml format
2024-08-27 07:46:51 +00:00
liyang
e404e9dafc chore: setting docker authentication in dev-build image (#4623) 2024-08-27 03:49:53 +00:00
liyang
63a442632e fix: failed to get version (#4622) 2024-08-26 15:33:30 +00:00
liyang
d39bafcfbd fix: change toolchain file name (#4621) 2024-08-26 13:04:06 +00:00
liyang
1717445ebe fix: failed to get github sha (#4620) 2024-08-26 11:42:07 +00:00
liyang
55d65da24d ci: add push dev-build images to aws ecr (#4618)
* ci: add push dev-build images to aws ecr

* chore: use toolchain file generation dev-build image tag

* chore: change dev-build version

* Update .github/workflows/release-dev-builder-images.yaml

Co-authored-by: zyy17 <zyylsxm@gmail.com>

---------

Co-authored-by: zyy17 <zyylsxm@gmail.com>
2024-08-26 09:36:55 +00:00
Weny Xu
3297d5f657 feat: allow skipping topic creation (#4616)
* feat: introduce `create_topics` opt

* feat: allow skipping topic creation

* chore: refine docs

* chore: apply suggestions from CR
2024-08-26 08:34:27 +00:00
Ning Sun
d6865911ee feat: add postgres response for trasaction related statements (#4562)
* feat: add postgres fixtures WIP

* feat: implement more postgres fixtures

* feat: add compatibility for transaction/set transaction/show transaction

* fix: improve regex for set transaction
2024-08-26 08:09:21 +00:00
dennis zhuang
63f2463273 feat!: impl admin command (#4600)
* feat: impl admin statement parser

* feat: introduce AsyncFunction and implements it for admin functions

* feat: execute admin functions

* fix: license header

* fix: panic in test

* chore: fixed by code review
2024-08-26 07:53:40 +00:00
Ruihang Xia
da337a9635 perf: acclerate scatter query (#4607)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-26 03:03:30 +00:00
fys
3973d6b01f chore: optimize common_version build (#4611) 2024-08-23 12:36:28 +00:00
discord9
2c731c76ad chore: add stats feature for jemalloc-ctl (#4610) 2024-08-23 11:18:30 +00:00
ozewr
40e7b58c80 feat: refactoring LruCacheLayer with list_with_metakey and concurrent_stat_in_list (#4596)
* use list_with_metakey and concurrent_stat_in_list

* change concurrent in recover_cache like before

* remove stat funcation

* use 8 concurrent

* use const value

* fmt code

* Apply suggestions from code review

---------

Co-authored-by: ozewr <l19ht@google.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-23 03:22:00 +00:00
zyy17
5177717f71 refactor: add fallback_to_local region option (#4578)
* refactor: add 'fallback_to_local_compaction' region option

* refactor: use 'fallback_to_local'
2024-08-23 03:09:43 +00:00
Weny Xu
8d61e6fe49 chore: bump rskafka to 75535b (#4608) 2024-08-23 03:05:52 +00:00
Ruihang Xia
a3b8d2fe8f chore: bump rust toolchain to 2024-08-21 (#4606)
* chore: bump rust toolchain to 2024-08-22

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update workflow

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* try 20240606

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-22 15:38:10 +00:00
Ning Sun
863ee073a9 chore: add commerial support section (#4601)
doc: add commerial support section
2024-08-22 12:03:20 +00:00
Weny Xu
25cd61b310 chore: upgrade toolchain to nightly-2024-08-07 (#4549)
* chore: upgrade toolchain to `nightly-2024-08-07`

* chore(ci): upgrade toolchain

* fix: fix unit test
2024-08-22 11:02:18 +00:00
fys
3517c13192 fix: incremental compilation always compile the common-version crate (#4605)
fix: wrong cargo:rerun
2024-08-22 11:00:33 +00:00
Ruihang Xia
b9cedf2c1a perf: optimize series divide algo (#4603)
* perf: optimize series divide algo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dead code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-22 09:16:36 +00:00
LFC
883c5bc5b0 refactor: skip checking the existence of the SST files (#4602)
refactor: skip checking the existence of the SST files when region is directly edited
2024-08-22 08:32:27 +00:00
Yingwen
d628079f4c feat: collect filters metrics for scanners (#4591)
* feat: collect filter metrics

* refactor: reuse ReaderFilterMetrics

* feat: record read rows from parquet by type

* feat: unordered scan observe rows

also fix read type

* chore: rename label
2024-08-22 03:22:05 +00:00
Weny Xu
0025fa6ec7 chore: bump opendal version to 0.49 (#4587)
* chore: bump opendal version to 0.49

* chore: apply suggestions from CR

* Update src/object-store/src/util.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-08-22 03:05:36 +00:00
Lanqing Yang
ff04109ee6 docs: add example configs introduced by pg_kvbackend (#4573)
chore: add example configs that introduced after pg_kvbackend
2024-08-22 01:52:02 +00:00
Yingwen
9c1704d4cb docs: move v0.9.1 benchmark report to tsbs dir (#4598)
* docs: move v0.9.1 benchmark report to tsbs dir

* docs: add newlines
2024-08-21 09:31:05 +00:00
Yingwen
a12a905578 chore: disable ttl for write cache by default (#4595)
* chore: remove default write cache ttl

* docs: update example config

* chore: fix ci
2024-08-21 08:38:38 +00:00
shuiyisong
449236360d docs: log benchmark (#4597)
* chore: add log benchmark stuff

* chore: minor update
2024-08-21 07:12:32 +00:00
localhost
bf16422cee fix: pipeline prepare loop break detects a conditional error (#4593) 2024-08-21 06:20:09 +00:00
Ran Joe
9db08dbbe0 refactor(mito2): reduce duplicate IndexOutput struct (#4592)
* refactor(mito2): reduce duplicate IndexOutput struct

* docs(mito2): add index output note
2024-08-20 12:30:17 +00:00
fys
9d885fa0c2 chore: bump tikv-jemalloc* to "0.6" (#4590)
chore: bump tikv-jemalloc* ti "0.6"
2024-08-20 09:08:21 +00:00
Ruihang Xia
b25a2b117e feat: remove sql in error desc (#4589)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-20 06:37:30 +00:00
fys
6fccff4810 chore: keep symbol table in nightly profile (#4588)
chore: keep symbol table in nighly profile
2024-08-20 02:27:31 +00:00
ozewr
30af78700f feat: Implement the Buf to avoid extra memory allocation (#4585)
* feat: Implement the Buf to avoid extra memory allocation

* fmt toml

* fmt code

* mv entry.into_buffer to raw_entry_buffer

* less reuse opendal

* remove todo #4065

* Update src/mito2/src/wal/entry_reader.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* fmt code

---------

Co-authored-by: ozewr <l19ht@google.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-19 12:11:08 +00:00
Ruihang Xia
8de11a0e34 perf: set simple filter on primary key columns to exact filter (#4564)
* perf: set simple filter on primary key columns to exact filter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-19 09:07:35 +00:00
333 changed files with 8683 additions and 4046 deletions

View File

@@ -17,6 +17,12 @@ inputs:
description: Enable dev mode, only build standard greptime
required: false
default: "false"
image-namespace:
description: Image Namespace
required: true
image-registry:
description: Image Registry
required: true
working-dir:
description: Working directory to build the artifacts
required: false
@@ -31,8 +37,8 @@ runs:
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \
IMAGE_NAMESPACE=i8k6a5e1/greptime \
IMAGE_REGISTRY=public.ecr.aws
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -51,8 +57,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
@@ -64,8 +70,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -82,8 +88,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -94,5 +100,5 @@ runs:
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}

View File

@@ -4,9 +4,6 @@ inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
@@ -43,10 +40,9 @@ runs:
brew install protobuf
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
- name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}

View File

@@ -4,9 +4,6 @@ inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
@@ -28,10 +25,9 @@ runs:
- uses: arduino/setup-protoc@v3
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
components: llvm-tools-preview
- name: Rust Cache

View File

@@ -18,6 +18,8 @@ runs:
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set resources.limits.cpu=1000m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \

View File

@@ -38,7 +38,7 @@ runs:
steps:
- name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -25,7 +25,7 @@ runs:
steps:
- name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -12,9 +12,6 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
apidoc:
runs-on: ubuntu-20.04
@@ -23,9 +20,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html

View File

@@ -177,6 +177,8 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -206,6 +208,8 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

View File

@@ -29,9 +29,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
check-typos-and-docs:
name: Check typos and docs
@@ -64,9 +61,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -82,9 +77,7 @@ jobs:
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -107,9 +100,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
@@ -161,9 +152,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -181,7 +170,7 @@ jobs:
name: bins
path: .
- name: Unzip binaries
run: |
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
@@ -221,9 +210,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -274,9 +261,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
@@ -287,7 +272,7 @@ jobs:
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
@@ -301,7 +286,7 @@ jobs:
artifacts-dir: bin
version: current
distributed-fuzztest:
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
@@ -344,9 +329,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -416,12 +399,12 @@ jobs:
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
@@ -433,13 +416,13 @@ jobs:
- name: Delete cluster
if: success()
shell: bash
run: |
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
distributed-fuzztest-with-chaos:
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
@@ -447,7 +430,7 @@ jobs:
strategy:
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
mode:
- name: "Remote WAL"
minio: true
kafka: true
@@ -484,9 +467,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -557,12 +538,12 @@ jobs:
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
@@ -574,7 +555,7 @@ jobs:
- name: Delete cluster
if: success()
shell: bash
run: |
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
@@ -627,17 +608,16 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Run cargo fmt
run: cargo fmt --all -- --check
- name: Check format
run: make fmt-check
clippy:
name: Clippy
@@ -648,9 +628,8 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
@@ -674,9 +653,8 @@ jobs:
with:
version: "14.0"
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2

View File

@@ -154,6 +154,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -173,6 +175,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

View File

@@ -9,9 +9,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
permissions:
issues: write
@@ -52,9 +49,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run sqlness
@@ -85,9 +80,8 @@ jobs:
with:
version: "14.0"
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2

View File

@@ -1,12 +1,14 @@
name: Release dev-builder images
on:
push:
branches:
- main
paths:
- rust-toolchain.toml
- 'docker/dev-builder/**'
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
version:
description: Version of the dev-builder
required: false
default: latest
release_dev_builder_ubuntu_image:
type: boolean
description: Release dev-builder-ubuntu image
@@ -28,22 +30,103 @@ jobs:
name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores
outputs:
version: ${{ steps.set-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure build image version
id: set-version
shell: bash
run: |
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
- name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images
with:
version: ${{ inputs.version }}
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.ECR_REGION }}
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@v2
env:
AWS_REGION: ${{ vars.ECR_REGION }}
with:
registry-type: public
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region
runs-on: ubuntu-20.04
@@ -51,35 +134,39 @@ jobs:
release-dev-builder-images
]
steps:
- name: Login to AliCloud Container Registry
uses: docker/login-action@v3
with:
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -33,6 +33,7 @@ on:
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
@@ -82,7 +83,6 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-20
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -123,6 +123,11 @@ jobs:
with:
fetch-depth: 0
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
@@ -183,6 +188,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -202,6 +209,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-macos-artifacts:
name: Build macOS artifacts
@@ -240,11 +249,11 @@ jobs:
- uses: ./.github/actions/build-macos-artifacts
with:
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result
@@ -283,7 +292,6 @@ jobs:
- uses: ./.github/actions/build-windows-artifacts
with:
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}

241
Cargo.lock generated
View File

@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-decimal",
@@ -762,7 +762,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -897,6 +897,15 @@ dependencies = [
"tokio",
]
[[package]]
name = "backon"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2caf634d05fe0642d0fb1ab43497fa627088ecd93f84b2d0f2a5d7b91f7730db"
dependencies = [
"fastrand",
]
[[package]]
name = "backtrace"
version = "0.3.73"
@@ -1277,7 +1286,7 @@ dependencies = [
[[package]]
name = "cache"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"catalog",
"common-error",
@@ -1285,7 +1294,7 @@ dependencies = [
"common-meta",
"moka",
"snafu 0.8.4",
"substrait 0.9.2",
"substrait 0.9.3",
]
[[package]]
@@ -1312,7 +1321,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow",
@@ -1638,7 +1647,7 @@ checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
[[package]]
name = "client"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -1668,7 +1677,7 @@ dependencies = [
"serde_json",
"snafu 0.8.4",
"substrait 0.37.3",
"substrait 0.9.2",
"substrait 0.9.3",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -1698,7 +1707,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"auth",
@@ -1754,7 +1763,7 @@ dependencies = [
"session",
"snafu 0.8.4",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"temp-env",
"tempfile",
@@ -1800,7 +1809,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anymap",
"bitvec",
@@ -1816,7 +1825,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"chrono",
"common-error",
@@ -1827,7 +1836,7 @@ dependencies = [
[[package]]
name = "common-config"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-error",
@@ -1850,7 +1859,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"arrow-schema",
@@ -1887,7 +1896,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"bigdecimal",
"common-error",
@@ -1900,7 +1909,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"snafu 0.8.4",
"strum 0.25.0",
@@ -1909,7 +1918,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -1924,7 +1933,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -1941,6 +1950,8 @@ dependencies = [
"common-version",
"datafusion",
"datatypes",
"geohash",
"h3o",
"num",
"num-traits",
"once_cell",
@@ -1954,11 +1965,12 @@ dependencies = [
"statrs",
"store-api",
"table",
"tokio",
]
[[package]]
name = "common-greptimedb-telemetry"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-runtime",
@@ -1975,7 +1987,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -2001,7 +2013,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"common-base",
@@ -2019,7 +2031,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arc-swap",
"common-query",
@@ -2033,7 +2045,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-error",
"common-macro",
@@ -2046,7 +2058,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anymap2",
"api",
@@ -2102,15 +2114,15 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "0.9.2"
version = "0.9.3"
[[package]]
name = "common-procedure"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-stream",
"async-trait",
"backon",
"backon 1.0.2",
"common-base",
"common-error",
"common-macro",
@@ -2132,7 +2144,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-procedure",
@@ -2140,7 +2152,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -2166,7 +2178,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arc-swap",
"common-error",
@@ -2185,7 +2197,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-error",
@@ -2207,7 +2219,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"atty",
"backtrace",
@@ -2234,7 +2246,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"client",
"common-query",
@@ -2246,7 +2258,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"chrono",
@@ -2262,7 +2274,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"build-data",
"const_format",
@@ -2273,7 +2285,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-error",
@@ -3081,7 +3093,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -3130,7 +3142,7 @@ dependencies = [
"session",
"snafu 0.8.4",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"toml 0.8.14",
@@ -3139,7 +3151,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"arrow-array",
@@ -3709,7 +3721,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -3803,9 +3815,15 @@ dependencies = [
"num-traits",
]
[[package]]
name = "float_eq"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
[[package]]
name = "flow"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow",
@@ -3862,7 +3880,7 @@ dependencies = [
"snafu 0.8.4",
"store-api",
"strum 0.25.0",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tonic 0.11.0",
@@ -3909,7 +3927,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -4201,6 +4219,27 @@ dependencies = [
"version_check",
]
[[package]]
name = "geo-types"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
dependencies = [
"approx",
"num-traits",
"serde",
]
[[package]]
name = "geohash"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fb94b1a65401d6cbf22958a9040aa364812c26674f841bee538b12c135db1e6"
dependencies = [
"geo-types",
"libm",
]
[[package]]
name = "gethostname"
version = "0.2.3"
@@ -4291,6 +4330,25 @@ dependencies = [
"tracing",
]
[[package]]
name = "h3o"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de3592e1f699692aa0525c42ff7879ec3ee7e36329af20967bc910a1cdc39c7"
dependencies = [
"ahash 0.8.11",
"either",
"float_eq",
"h3o-bit",
"libm",
]
[[package]]
name = "h3o-bit"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb45e8060378c0353781abf67e1917b545a6b710d0342d85b70c125af7ef320"
[[package]]
name = "half"
version = "1.8.3"
@@ -4568,9 +4626,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "human-panic"
version = "1.2.3"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4f016c89920bbb30951a8405ecacbb4540db5524313b9445736e7e1855cf370"
checksum = "1c5a08ed290eac04006e21e63d32e90086b6182c7cd0452d10f4264def1fec9a"
dependencies = [
"anstream",
"anstyle",
@@ -5020,7 +5078,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -5800,7 +5858,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "log-store"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-stream",
"async-trait",
@@ -5816,6 +5874,7 @@ dependencies = [
"common-time",
"common-wal",
"delta-encoding",
"derive_builder 0.12.0",
"futures",
"futures-util",
"itertools 0.10.5",
@@ -6111,7 +6170,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -6137,7 +6196,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -6215,7 +6274,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -6306,7 +6365,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -6953,7 +7012,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anyhow",
"bytes",
@@ -7010,13 +7069,13 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
version = "0.48.0"
version = "0.49.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "615d41187deea0ea7fab5b48e9afef6ae8fc742fdcfa248846ee3d92ff71e986"
checksum = "ba615070686c8781ce97376fdafca29d7c42f47b31d2230d7c8c1642ec823950"
dependencies = [
"anyhow",
"async-trait",
"backon",
"backon 0.4.4",
"base64 0.22.1",
"bytes",
"chrono",
@@ -7200,7 +7259,7 @@ dependencies = [
[[package]]
name = "operator"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -7245,7 +7304,7 @@ dependencies = [
"sql",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tokio-util",
@@ -7495,7 +7554,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -7784,7 +7843,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"api",
@@ -7945,7 +8004,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"auth",
"common-base",
@@ -8214,7 +8273,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -8449,7 +8508,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-compression 0.4.11",
"async-trait",
@@ -8571,7 +8630,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8634,7 +8693,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tokio-stream",
@@ -9170,8 +9229,9 @@ dependencies = [
[[package]]
name = "rsasl"
version = "2.0.2"
source = "git+https://github.com/wenyxu/rsasl.git?rev=06ebb683d5539c3410de4ce9fa37ff9b97e790a4#06ebb683d5539c3410de4ce9fa37ff9b97e790a4"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45035615cdd68c71daac89aef75b130d4b2cad29599966e1b4671f8fbb463559"
dependencies = [
"base64 0.22.1",
"core2",
@@ -9188,9 +9248,8 @@ dependencies = [
[[package]]
name = "rskafka"
version = "0.5.0"
source = "git+https://github.com/WenyXu/rskafka.git?rev=940c6030012c5b746fad819fb72e3325b26e39de#940c6030012c5b746fad819fb72e3325b26e39de"
source = "git+https://github.com/influxdata/rskafka.git?rev=75535b5ad9bae4a5dbb582c82e44dfd81ec10105#75535b5ad9bae4a5dbb582c82e44dfd81ec10105"
dependencies = [
"async-trait",
"bytes",
"chrono",
"crc32c",
@@ -9199,7 +9258,6 @@ dependencies = [
"integer-encoding 4.0.0",
"lz4",
"parking_lot 0.12.3",
"pin-project-lite",
"rand",
"rsasl",
"rustls 0.23.10",
@@ -9997,7 +10055,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -10291,7 +10349,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"aide",
"api",
@@ -10397,7 +10455,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -10698,7 +10756,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"chrono",
@@ -10758,7 +10816,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"clap 4.5.7",
@@ -10975,7 +11033,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -11144,7 +11202,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"bytes",
@@ -11345,7 +11403,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -11610,7 +11668,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arbitrary",
"async-trait",
@@ -11652,7 +11710,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -11712,7 +11770,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tempfile",
"time",
@@ -11800,9 +11858,9 @@ dependencies = [
[[package]]
name = "tikv-jemalloc-ctl"
version = "0.5.4"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c"
checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b"
dependencies = [
"libc",
"paste",
@@ -11811,9 +11869,9 @@ dependencies = [
[[package]]
name = "tikv-jemalloc-sys"
version = "0.5.4+5.3.0-patched"
version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1"
checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d"
dependencies = [
"cc",
"libc",
@@ -11821,9 +11879,9 @@ dependencies = [
[[package]]
name = "tikv-jemallocator"
version = "0.5.4"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca"
checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865"
dependencies = [
"libc",
"tikv-jemalloc-sys",
@@ -12431,6 +12489,16 @@ dependencies = [
"web-time 0.2.4",
]
[[package]]
name = "tracing-serde"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.18"
@@ -12441,12 +12509,15 @@ dependencies = [
"nu-ansi-term",
"once_cell",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log 0.2.0",
"tracing-serde",
]
[[package]]

View File

@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.9.2"
version = "0.9.3"
edition = "2021"
license = "Apache-2.0"
@@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
@@ -151,8 +152,7 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
# SCRAM-SHA-512 requires https://github.com/dequbed/rsasl/pull/48, https://github.com/influxdata/rskafka/pull/247
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "940c6030012c5b746fad819fb72e3325b26e39de", features = [
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.21"
@@ -251,7 +251,7 @@ debug = 1
[profile.nightly]
inherits = "release"
strip = true
strip = "debuginfo"
lto = "thin"
debug = false
incremental = false

View File

@@ -8,6 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-b4b105ad-20240827021230
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
@@ -77,7 +78,7 @@ build: ## Build debug version greptime.
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \
@@ -91,7 +92,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
build-android-bin: ## Build greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \
@@ -105,8 +106,8 @@ build-android-bin: ## Build greptime binary for android.
strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean
clean: ## Clean the project.
@@ -145,7 +146,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder.
@@ -190,6 +191,7 @@ fix-clippy: ## Fix clippy violations.
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose.
@@ -203,7 +205,7 @@ stop-etcd: ## Stop single node etcd for testing purpose.
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: start-cluster

View File

@@ -150,7 +150,7 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
## Project Status
The current version has not yet reached the standards for General Availability.
The current version has not yet reached the standards for General Availability.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
@@ -172,6 +172,13 @@ In addition, you may:
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between

View File

@@ -15,6 +15,8 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -67,9 +69,10 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start.<br/>**It's only used when the provider is `kafka`**. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
@@ -116,15 +119,17 @@
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -148,20 +153,21 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -233,20 +239,21 @@
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -259,12 +266,13 @@
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -285,9 +293,10 @@
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -295,20 +304,21 @@
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -329,6 +339,10 @@
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
@@ -408,15 +422,17 @@
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -438,20 +454,21 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -483,11 +500,12 @@
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |

View File

@@ -39,6 +39,18 @@ rpc_max_recv_message_size = "512MB"
## +toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -324,7 +336,7 @@ credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential= "base64-credential"
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
@@ -348,9 +360,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -394,9 +420,13 @@ sst_meta_cache_size = "128MB"
vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/8 of OS memory.
page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
@@ -407,7 +437,8 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## +toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -424,6 +455,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -514,7 +549,7 @@ fork_dictionary_bytes = "1GiB"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -530,6 +565,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -547,12 +585,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -59,7 +59,7 @@ retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -75,6 +75,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -166,7 +166,7 @@ tcp_nodelay = true
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -182,6 +182,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -199,12 +202,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -7,7 +7,7 @@ bind_addr = "127.0.0.1:3002"
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
## Etcd server address.
## Store server address default to etcd store.
store_addr = "127.0.0.1:2379"
## Datanode selector type.
@@ -32,6 +32,9 @@ store_key_prefix = ""
## - Using shared storage (e.g., s3).
enable_region_failover = false
## The datastore for meta server.
backend = "EtcdStore"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
@@ -96,7 +99,12 @@ provider = "raft_engine"
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Number of topics to be created upon start.
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
num_topics = 64
## Topic selector type.
@@ -105,6 +113,7 @@ num_topics = 64
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
@@ -144,7 +153,7 @@ backoff_deadline = "5mins"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -160,6 +169,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -177,12 +189,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -8,6 +8,13 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
@@ -171,7 +178,12 @@ sync_period = "10s"
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## Number of topics to be created upon start.
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
## **It's only used when the provider is `kafka`**.
num_topics = 64
@@ -182,6 +194,7 @@ num_topics = 64
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic"
@@ -385,9 +398,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -431,9 +458,13 @@ sst_meta_cache_size = "128MB"
vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/8 of OS memory.
page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
@@ -444,7 +475,8 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## +toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -461,6 +493,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -557,7 +593,7 @@ fork_dictionary_bytes = "1GiB"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -573,6 +609,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -590,12 +629,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -0,0 +1,51 @@
# Log benchmark configuration
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
Here are the versions of databases we used in the benchmark
| name | version |
| :------------ | :--------- |
| GreptimeDB | v0.9.2 |
| Clickhouse | 24.9.1.219 |
| Elasticsearch | 8.15.0 |
## Structured model vs Unstructured model
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
__Structured model__
The log data is pre-processed into columns by vector. For example an insert request looks like following
```SQL
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
```
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
__Unstructured model__
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
```SQL
INSERT INTO test_table (message, timestamp) VALUES ()
```
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
## Creating tables
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
The mapping of Elastic search is created automatically.
## Vector Configuration
We use vector to generate random log data and send inserts to databases.
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
## SQLs and payloads
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
## Steps to reproduce
0. Decide whether to run structured model test or unstructured mode test.
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
2. Create table in GreptimeDB and Clickhouse in advance.
3. Run vector to insert data.
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
## Addition
- You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).

View File

@@ -0,0 +1,56 @@
-- GreptimeDB create table clause
-- structured test, use vector to pre-process log data into fields
CREATE TABLE IF NOT EXISTS `test_table` (
`bytes` Int64 NULL,
`http_version` STRING NULL,
`ip` STRING NULL,
`method` STRING NULL,
`path` STRING NULL,
`status` SMALLINT UNSIGNED NULL,
`user` STRING NULL,
`timestamp` TIMESTAMP(3) NOT NULL,
PRIMARY KEY (`user`, `path`, `status`),
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- unstructured test, build fulltext index on message column
CREATE TABLE IF NOT EXISTS `test_table` (
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
`timestamp` TIMESTAMP(3) NOT NULL,
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- Clickhouse create table clause
-- structured test
CREATE TABLE IF NOT EXISTS test_table
(
bytes UInt64 NOT NULL,
http_version String NOT NULL,
ip String NOT NULL,
method String NOT NULL,
path String NOT NULL,
status UInt8 NOT NULL,
user String NOT NULL,
timestamp String NOT NULL,
)
ENGINE = MergeTree()
ORDER BY (user, path, status);
-- unstructured test
SET allow_experimental_full_text_index = true;
CREATE TABLE IF NOT EXISTS test_table
(
message String,
timestamp String,
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
)
ENGINE = MergeTree()
ORDER BY tuple();

View File

@@ -0,0 +1,199 @@
# Query URL and payload for Elastic Search
## Count
URL: `http://127.0.0.1:9200/_count`
## Query by timerange
URL: `http://127.0.0.1:9200/_search`
You can use the following payload to get the full timerange first.
```JSON
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
```
And then use this payload to query by timerange.
```JSON
{
"from": 0,
"size": 1000,
"query": {
"range": {
"timestamp": {
"gte": "2024-08-16T04:30:44.000Z",
"lte": "2024-08-16T04:51:52.000Z"
}
}
}
}
```
## Query by condition
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
}
]
}
}
}
```
## Query by condition and timerange
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T07:03:37.383Z",
"lte": "2024-08-19T07:24:58.883Z"
}
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T05:16:17.099Z",
"lte": "2024-08-19T05:46:02.722Z"
}
}
}
]
}
}
}
```

View File

@@ -0,0 +1,50 @@
-- Structured query for GreptimeDB and Clickhouse
-- query count
select count(*) from test_table;
-- query by timerange. Note: place the timestamp range in the where clause
-- GreptimeDB
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- query by condition
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
and timestamp between 1723774396760 and 1723774788760;
-- Clickhouse
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- Unstructured query for GreptimeDB and Clickhouse
-- query by condition
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%');
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
and timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%')
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';

View File

@@ -0,0 +1,57 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[transforms.parse_logs]
type = "remap"
inputs = ["demo_logs"]
source = '''
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
# Convert timestamp to a standard format
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
# Convert status and bytes to integers
.status = to_int!(.status)
.bytes = to_int!(.bytes)
'''
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "parse_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 4000
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "parse_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "parse_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -0,0 +1,43 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "demo_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 500
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "demo_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "demo_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -157,26 +157,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -326,26 +306,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -497,26 +457,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -668,26 +608,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -852,26 +772,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1023,26 +923,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1194,26 +1074,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1365,26 +1225,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1536,26 +1376,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1707,26 +1527,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -1878,26 +1678,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -2062,26 +1842,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -2233,26 +1993,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -2417,26 +2157,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -2569,26 +2289,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -2751,26 +2451,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -3091,28 +2771,7 @@
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
"overrides": []
},
"gridPos": {
"h": 8,
@@ -3242,26 +2901,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -3429,26 +3068,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -3598,26 +3217,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -3740,28 +3339,7 @@
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
"overrides": []
},
"gridPos": {
"h": 8,
@@ -4101,26 +3679,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -4270,26 +3828,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -4439,26 +3977,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -4608,26 +4126,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -4777,26 +4275,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -4946,26 +4424,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -5115,26 +4573,6 @@
}
}
]
},
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
},
@@ -5241,4 +4679,4 @@
"uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f",
"version": 11,
"weekStart": ""
}
}

View File

@@ -1,2 +1,3 @@
[toolchain]
channel = "nightly-2024-04-20"
channel = "nightly-2024-06-06"

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
function check_rust_toolchain_version() {
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
exit 1
fi
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
exit 1
fi
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
exit 1
fi
# Compare the version and the difference should be less than 1 day.
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
if [ $date_diff -gt 1 ]; then
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
exit 1
fi
}
check_rust_toolchain_version

69
scripts/check-snafu.py Normal file
View File

@@ -0,0 +1,69 @@
# Copyright 2023 Greptime Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
def find_rust_files(directory):
error_files = []
other_rust_files = []
for root, _, files in os.walk(directory):
for file in files:
if file == "error.rs":
error_files.append(os.path.join(root, file))
elif file.endswith(".rs"):
other_rust_files.append(os.path.join(root, file))
return error_files, other_rust_files
def extract_branch_names(file_content):
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu"
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
return False
def main():
error_files, other_rust_files = find_rust_files(".")
branch_names = []
for error_file in error_files:
with open(error_file, "r") as file:
content = file.read()
branch_names.extend(extract_branch_names(content))
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)
if __name__ == "__main__":
main()

View File

@@ -13,9 +13,11 @@
// limitations under the License.
use common_base::secrets::ExposeSecret;
use common_error::ext::BoxedError;
use snafu::{OptionExt, ResultExt};
use crate::error::{
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
@@ -49,6 +51,19 @@ impl MockUserProvider {
info.schema.clone_into(&mut self.schema);
info.username.clone_into(&mut self.username);
}
// this is a deliberate function to ref AuthBackendSnafu
// so that it won't get deleted in the future
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
let none_option = None;
none_option
.context(UserNotFoundSnafu {
username: "no_user".to_string(),
})
.map_err(BoxedError::new)
.context(AuthBackendSnafu)
}
}
#[async_trait::async_trait]

View File

@@ -18,6 +18,7 @@ use std::sync::Arc;
use api::v1::greptime_request::Request;
use auth::error::Error::InternalState;
use auth::error::InternalStateSnafu;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
use sql::statements::show::{ShowDatabases, ShowKind};
use sql::statements::statement::Statement;
@@ -33,9 +34,10 @@ impl PermissionChecker for DummyPermissionChecker {
match req {
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
_ => Err(InternalState {
_ => InternalStateSnafu {
msg: "testing".to_string(),
}),
}
.fail(),
}
}
}

View File

@@ -97,13 +97,6 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
@@ -186,13 +179,6 @@ pub enum Error {
source: common_query::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
#[snafu(implicit)]
@@ -288,8 +274,6 @@ impl ErrorExt for Error {
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source, .. } => source.status_code(),
@@ -303,7 +287,6 @@ impl ErrorExt for Error {
Error::CreateTable { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
@@ -338,27 +321,6 @@ mod tests {
use super::*;
#[test]
pub fn test_error_status_code() {
assert_eq!(
StatusCode::TableAlreadyExists,
Error::TableExists {
table: "some_table".to_string(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
msg: String::default(),
location: Location::generate(),
}
.status_code()
);
}
#[test]
pub fn test_errors_to_datafusion_error() {
let e: DataFusionError = Error::TableExists {

View File

@@ -20,8 +20,8 @@ use std::time::Duration;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -282,8 +282,11 @@ impl KvBackend for CachedMetaKvBackend {
_ => Err(e),
},
}
.map_err(|e| GetKvCache {
err_msg: e.to_string(),
.map_err(|e| {
GetKvCacheSnafu {
err_msg: e.to_string(),
}
.build()
});
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need

View File

@@ -313,7 +313,7 @@ struct SystemCatalog {
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
// system_schema_provier for default catalog
// system_schema_provider for default catalog
information_schema_provider: Arc<InformationSchemaProvider>,
pg_catalog_provider: Arc<PGCatalogProvider>,
backend: KvBackendRef,

View File

@@ -37,7 +37,8 @@ use tonic::metadata::AsciiMetadataKey;
use tonic::transport::Channel;
use crate::error::{
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
ServerSnafu,
};
use crate::{from_grpc_response, Client, Result};
@@ -91,7 +92,7 @@ impl Database {
///
/// - the name of database when using GreptimeDB standalone or cluster
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
/// environment
/// environment
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
Self {
catalog: String::default(),
@@ -225,16 +226,18 @@ impl Database {
let mut client = self.client.make_flight_client()?;
let response = client.mut_inner().do_get(request).await.map_err(|e| {
let response = client.mut_inner().do_get(request).await.or_else(|e| {
let tonic_code = e.code();
let e: Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error =
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
FlightGetSnafu {
addr: client.addr().to_string(),
tonic_code,
}
});
error!(
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
client.addr(),

View File

@@ -39,13 +39,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
#[snafu(implicit)]
@@ -116,13 +109,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to parse ascii string: {}", value))]
InvalidAscii {
value: String,
@@ -138,12 +124,10 @@ impl ErrorExt for Error {
match self {
Error::IllegalFlightMessages { .. }
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. }
| Error::ClientStreaming { .. } => StatusCode::Internal,
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. }
| Error::FlowServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. }

View File

@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
use snafu::{location, ResultExt};
use snafu::ResultExt;
use crate::error::Result;
use crate::error::{FlowServerSnafu, Result};
use crate::Client;
#[derive(Debug)]
@@ -57,15 +57,10 @@ impl FlowRequester {
let response = client
.handle_create_remove(request)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)
@@ -88,15 +83,10 @@ impl FlowRequester {
let response = client
.handle_mirror_request(requests)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)

View File

@@ -38,8 +38,8 @@ use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio_stream::StreamExt;
use crate::error::{
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
MissingFieldSnafu, Result, ServerSnafu,
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
};
use crate::{metrics, Client, Error};
@@ -103,11 +103,14 @@ impl RegionRequester {
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: flight_client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error = ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.with_context(|_| FlightGetSnafu {
tonic_code,
addr: flight_client.addr().to_string(),
})
.unwrap_err();
error!(
e; "Failed to do Flight get, addr: {}, code: {}",
flight_client.addr(),

View File

@@ -51,7 +51,7 @@ file-engine.workspace = true
flow.workspace = true
frontend = { workspace = true, default-features = false }
futures.workspace = true
human-panic = "1.2.2"
human-panic = "2.0"
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
@@ -80,7 +80,7 @@ tonic.workspace = true
tracing-appender = "0.2"
[target.'cfg(not(windows))'.dependencies]
tikv-jemallocator = "0.5"
tikv-jemallocator = "0.6"
[dev-dependencies]
client = { workspace = true, features = ["testing"] }

View File

@@ -139,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
}
fn setup_human_panic() {
let metadata = human_panic::Metadata {
version: env!("CARGO_PKG_VERSION").into(),
name: "GreptimeDB".into(),
authors: Default::default(),
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
};
human_panic::setup_panic!(metadata);
human_panic::setup_panic!(
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
);
common_telemetry::set_panic_hook();
}

View File

@@ -21,6 +21,8 @@ mod export;
mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
mod database;
mod import;
#[allow(unused)]
mod repl;
@@ -32,6 +34,7 @@ pub use repl::Repl;
use tracing_appender::non_blocking::WorkerGuard;
use self::export::ExportCommand;
use crate::cli::import::ImportCommand;
use crate::error::Result;
use crate::options::GlobalOptions;
use crate::App;
@@ -114,6 +117,7 @@ enum SubCommand {
// Attach(AttachCommand),
Bench(BenchTableMetadataCommand),
Export(ExportCommand),
Import(ImportCommand),
}
impl SubCommand {
@@ -122,6 +126,7 @@ impl SubCommand {
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Bench(cmd) => cmd.build(guard).await,
SubCommand::Export(cmd) => cmd.build(guard).await,
SubCommand::Import(cmd) => cmd.build(guard).await,
}
}
}

119
src/cmd/src/cli/database.rs Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
pub(crate) struct DatabaseClient {
addr: String,
catalog: String,
auth_header: Option<String>,
}
impl DatabaseClient {
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
Self {
addr,
catalog,
auth_header,
}
}
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
self.sql(sql, DEFAULT_SCHEMA_NAME).await
}
/// Execute sql query.
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!("http://{}/v1/sql", self.addr);
let params = [
("db", format!("{}-{}", self.catalog, schema)),
("sql", sql.to_string()),
];
let mut request = reqwest::Client::new()
.post(&url)
.form(&params)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
}
}
/// Split at `-`.
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = match database.split_once('-') {
Some((catalog, schema)) => (catalog, schema),
None => (DEFAULT_CATALOG_NAME, database),
};
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_split_database() {
let result = split_database("catalog-schema").unwrap();
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
let result = split_database("schema").unwrap();
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
let result = split_database("catalog-*").unwrap();
assert_eq!(result, ("catalog".to_string(), None));
let result = split_database("*").unwrap();
assert_eq!(result, ("greptime".to_string(), None));
}
}

View File

@@ -13,30 +13,23 @@
// limitations under the License.
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use base64::engine::general_purpose;
use base64::Engine;
use clap::{Parser, ValueEnum};
use client::DEFAULT_SCHEMA_NAME;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_telemetry::{debug, error, info};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::{Instance, Tool};
use crate::error::{
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
};
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
type TableReference = (String, String, String);
@@ -94,26 +87,21 @@ pub struct ExportCommand {
impl ExportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = split_database(&self.database)?;
let (catalog, schema) = database::split_database(&self.database)?;
let auth_header = if let Some(basic) = &self.auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Export {
addr: self.addr.clone(),
catalog,
schema,
database_client,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
auth_header,
}),
guard,
))
@@ -121,78 +109,59 @@ impl ExportCommand {
}
pub struct Export {
addr: String,
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
output_dir: String,
parallelism: usize,
target: ExportTarget,
start_time: Option<String>,
end_time: Option<String>,
auth_header: Option<String>,
}
impl Export {
/// Execute one single sql query.
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!(
"http://{}/v1/sql?db={}-{}&sql={}",
self.addr,
self.catalog,
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
sql
);
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.output_dir).join(&self.catalog)
}
let mut request = reqwest::Client::new()
.get(&url)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
/// Iterate over all db names.
///
/// Newbie: `db_name` is catalog + schema.
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
if let Some(schema) = &self.schema {
Ok(vec![(self.catalog.clone(), schema.clone())])
} else {
let result = self.sql("SHOW DATABASES").await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn all_db_names(&self) -> Result<Vec<String>> {
let records = self
.database_client
.sql_in_public("SHOW DATABASES")
.await?
.context(EmptyResultSnafu)?;
let mut result = Vec::with_capacity(records.len());
for value in records {
let Value::String(schema) = &value[0] else {
unreachable!()
};
let mut result = Vec::with_capacity(records.len());
for value in records {
let Value::String(schema) = &value[0] else {
unreachable!()
};
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
result.push((self.catalog.clone(), schema.clone()));
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
Ok(result)
if schema == common_catalog::consts::PG_CATALOG_NAME {
continue;
}
result.push(schema.clone());
}
Ok(result)
}
/// Return a list of [`TableReference`] to be exported.
@@ -201,7 +170,11 @@ impl Export {
&self,
catalog: &str,
schema: &str,
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
) -> Result<(
Vec<TableReference>,
Vec<TableReference>,
Vec<TableReference>,
)> {
// Puts all metric table first
let sql = format!(
"SELECT table_catalog, table_schema, table_name \
@@ -210,15 +183,16 @@ impl Export {
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'"
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let mut metric_physical_tables = HashSet::with_capacity(records.len());
for value in records {
let mut t = Vec::with_capacity(3);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
@@ -226,100 +200,142 @@ impl Export {
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
}
// TODO: SQL injection hurts
let sql = format!(
"SELECT table_catalog, table_schema, table_name \
"SELECT table_catalog, table_schema, table_name, table_type \
FROM information_schema.tables \
WHERE table_type = \'BASE TABLE\' \
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'",
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
debug!("Fetched table list: {:?}", records);
debug!("Fetched table/view list: {:?}", records);
if records.is_empty() {
return Ok((vec![], vec![]));
return Ok((vec![], vec![], vec![]));
}
let mut remaining_tables = Vec::with_capacity(records.len());
let mut views = Vec::new();
for value in records {
let mut t = Vec::with_capacity(3);
let mut t = Vec::with_capacity(4);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
}
let table = (t[0].clone(), t[1].clone(), t[2].clone());
let table_type = t[3].as_str();
// Ignores the physical table
if !metric_physical_tables.contains(&table) {
remaining_tables.push(table);
if table_type == "VIEW" {
views.push(table);
} else {
remaining_tables.push(table);
}
}
}
Ok((
metric_physical_tables.into_iter().collect(),
remaining_tables,
views,
))
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!(
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
catalog, schema, table
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn show_create(
&self,
show_type: &str,
catalog: &str,
schema: &str,
table: Option<&str>,
) -> Result<String> {
let sql = match table {
Some(table) => format!(
r#"SHOW CREATE {} "{}"."{}"."{}""#,
show_type, catalog, schema, table
),
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
};
let Value::String(create_table) = &records[0][1] else {
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let Value::String(create) = &records[0][1] else {
unreachable!()
};
Ok(format!("{};\n", create_table))
Ok(format!("{};\n", create))
}
async fn export_create_database(&self) -> Result<()> {
let timer = Instant::now();
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
for schema in db_names {
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let file = db_dir.join("create_database.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
let create_database = self
.show_create("DATABASE", &self.catalog, &schema, None)
.await?;
file.write_all(create_database.as_bytes())
.await
.context(FileIoSnafu)?;
}
let elapsed = timer.elapsed();
info!("Success {db_count} jobs, cost: {elapsed:?}");
Ok(())
}
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let (metric_physical_tables, remaining_tables) =
self.get_table_list(&catalog, &schema).await?;
let table_count = metric_physical_tables.len() + remaining_tables.len();
let output_dir = Path::new(&self.output_dir)
.join(&catalog)
.join(format!("{schema}/"));
tokio::fs::create_dir_all(&output_dir)
let (metric_physical_tables, remaining_tables, views) =
self.get_table_list(&self.catalog, &schema).await?;
let table_count =
metric_physical_tables.len() + remaining_tables.len() + views.len();
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let output_file = Path::new(&output_dir).join("create_tables.sql");
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
let file = db_dir.join("create_tables.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
match self.show_create_table(&c, &s, &t).await {
Err(e) => {
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
}
Ok(create_table) => {
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
}
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
for (c, s, v) in views {
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
file.write_all(create_view.as_bytes())
.await
.context(FileIoSnafu)?;
}
info!(
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
output_dir.to_string_lossy()
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
self.catalog,
db_dir.to_string_lossy()
);
Ok::<(), Error>(())
@@ -332,14 +348,14 @@ impl Export {
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
error!(e; "export schema job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
Ok(())
}
@@ -347,17 +363,15 @@ impl Export {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let output_dir = Path::new(&self.output_dir)
.join(&catalog)
.join(format!("{schema}/"));
tokio::fs::create_dir_all(&output_dir)
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
@@ -379,30 +393,31 @@ impl Export {
let sql = format!(
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
catalog,
self.catalog,
schema,
output_dir.to_str().unwrap(),
db_dir.to_str().unwrap(),
with_options
);
info!("Executing sql: {sql}");
self.sql(&sql).await?;
self.database_client.sql_in_public(&sql).await?;
info!(
"Finished exporting {catalog}.{schema} data into path: {}",
output_dir.to_string_lossy()
"Finished exporting {}.{schema} data into path: {}",
self.catalog,
db_dir.to_string_lossy()
);
// The export copy from sql
let copy_from_file = output_dir.join("copy_from.sql");
let copy_from_file = db_dir.join("copy_from.sql");
let mut writer =
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
let copy_database_from_sql = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
catalog,
self.catalog,
schema,
output_dir.to_str().unwrap()
db_dir.to_str().unwrap()
);
writer
.write(copy_database_from_sql.as_bytes())
@@ -410,7 +425,7 @@ impl Export {
.context(FileIoSnafu)?;
writer.flush().await.context(FileIoSnafu)?;
info!("Finished exporting {catalog}.{schema} copy_from.sql");
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
Ok::<(), Error>(())
})
@@ -429,20 +444,23 @@ impl Export {
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
Ok(())
}
}
#[allow(deprecated)]
#[async_trait]
impl Tool for Export {
async fn do_work(&self) -> Result<()> {
match self.target {
ExportTarget::Schema => self.export_create_table().await,
ExportTarget::Schema => {
self.export_create_database().await?;
self.export_create_table().await
}
ExportTarget::Data => self.export_database_data().await,
ExportTarget::All => {
self.export_create_database().await?;
self.export_create_table().await?;
self.export_database_data().await
}
@@ -450,20 +468,6 @@ impl Tool for Export {
}
}
/// Split at `-`.
fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = match database.split_once('-') {
Some((catalog, schema)) => (catalog, schema),
None => (DEFAULT_CATALOG_NAME, database),
};
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use clap::Parser;
@@ -471,26 +475,10 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::logging::LoggingOptions;
use crate::cli::export::split_database;
use crate::error::Result as CmdResult;
use crate::options::GlobalOptions;
use crate::{cli, standalone, App};
#[test]
fn test_split_database() {
let result = split_database("catalog-schema").unwrap();
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
let result = split_database("schema").unwrap();
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
let result = split_database("catalog-*").unwrap();
assert_eq!(result, ("catalog".to_string(), None));
let result = split_database("*").unwrap();
assert_eq!(result, ("greptime".to_string(), None));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
let output_dir = tempfile::tempdir().unwrap();

218
src/cmd/src/cli/import.rs Normal file
View File

@@ -0,0 +1,218 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_telemetry::{error, info, warn};
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
#[derive(Debug, Default, Clone, ValueEnum)]
enum ImportTarget {
/// Import all table schemas into the database.
Schema,
/// Import all table data into the database.
Data,
/// Export all table schemas and data at once.
#[default]
All,
}
#[derive(Debug, Default, Parser)]
pub struct ImportCommand {
/// Server address to connect
#[clap(long)]
addr: String,
/// Directory of the data. E.g.: /tmp/greptimedb-backup
#[clap(long)]
input_dir: String,
/// The name of the catalog to import.
#[clap(long, default_value = "greptime-*")]
database: String,
/// Parallelism of the import.
#[clap(long, short = 'j', default_value = "1")]
import_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
max_retry: usize,
/// Things to export
#[clap(long, short = 't', value_enum, default_value = "all")]
target: ImportTarget,
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
}
impl ImportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Import {
catalog,
schema,
database_client,
input_dir: self.input_dir.clone(),
parallelism: self.import_jobs,
target: self.target.clone(),
}),
guard,
))
}
}
pub struct Import {
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
input_dir: String,
parallelism: usize,
target: ImportTarget,
}
impl Import {
async fn import_create_table(&self) -> Result<()> {
// Use default db to creates other dbs
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
.await?;
self.do_sql_job("create_tables.sql", None).await
}
async fn import_database_data(&self) -> Result<()> {
self.do_sql_job("copy_from.sql", None).await
}
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let database_input_dir = self.catalog_path().join(&schema);
let sql_file = database_input_dir.join(filename);
let sql = tokio::fs::read_to_string(sql_file)
.await
.context(FileIoSnafu)?;
if sql.is_empty() {
info!("Empty `{filename}` {database_input_dir:?}");
} else {
let db = exec_db.unwrap_or(&schema);
self.database_client.sql(&sql, db).await?;
info!("Imported `{filename}` for database {schema}");
}
Ok::<(), Error>(())
})
}
let success = futures::future::join_all(tasks)
.await
.into_iter()
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "import {filename} job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
Ok(())
}
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.input_dir).join(&self.catalog)
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
// Get all database names in the input directory.
// The directory structure should be like:
// /tmp/greptimedb-backup
// ├── greptime-1
// │ ├── db1
// │ └── db2
async fn all_db_names(&self) -> Result<Vec<String>> {
let mut db_names = vec![];
let path = self.catalog_path();
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
let path = entry.path();
if path.is_dir() {
let db_name = match path.file_name() {
Some(name) => name.to_string_lossy().to_string(),
None => {
warn!("Failed to get the file name of {:?}", path);
continue;
}
};
db_names.push(db_name);
}
}
Ok(db_names)
}
}
#[async_trait]
impl Tool for Import {
async fn do_work(&self) -> Result<()> {
match self.target {
ImportTarget::Schema => self.import_create_table().await,
ImportTarget::Data => self.import_database_data().await,
ImportTarget::All => {
self.import_create_table().await?;
self.import_database_data().await
}
}
}
}

View File

@@ -267,7 +267,7 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version(), short_version());
log_versions(version(), short_version(), APP_NAME);
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);

View File

@@ -31,13 +31,6 @@ pub enum Error {
source: common_meta::error::Error,
},
#[snafu(display("Failed to iter stream"))]
IterStream {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to init DDL manager"))]
InitDdlManager {
#[snafu(implicit)]
@@ -237,13 +230,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to start catalog manager"))]
StartCatalogManager {
#[snafu(implicit)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
ConnectEtcd {
etcd_addr: String,
@@ -253,14 +239,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to connect server at {addr}"))]
ConnectServer {
addr: String,
source: client::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
@@ -278,12 +256,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Expect data from output, but got another thing"))]
NotDataFromOutput {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Empty result from output"))]
EmptyResult {
#[snafu(implicit)]
@@ -346,13 +318,12 @@ pub enum Error {
source: meta_client::error::Error,
},
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
TonicTransport {
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
SchemaNotFound {
catalog: String,
schema: String,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: tonic::transport::Error,
msg: Option<String>,
},
}
@@ -370,18 +341,16 @@ impl ErrorExt for Error {
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. }
| Error::InitMetadata { source, .. }
| Error::InitDdlManager { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::ConnectServer { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::InitTimezone { .. }
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
@@ -399,7 +368,6 @@ impl ErrorExt for Error {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
StatusCode::Unexpected
@@ -414,7 +382,7 @@ impl ErrorExt for Error {
source.status_code()
}
Error::MetaClientInit { source, .. } => source.status_code(),
Error::TonicTransport { .. } => StatusCode::Internal,
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
}
}

View File

@@ -215,7 +215,7 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version(), short_version());
log_versions(version(), short_version(), APP_NAME);
info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts);

View File

@@ -261,7 +261,7 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.clone(),
);
log_versions(version(), short_version());
log_versions(version(), short_version(), APP_NAME);
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);

View File

@@ -30,7 +30,7 @@ pub mod standalone;
lazy_static::lazy_static! {
static ref APP_VERSION: prometheus::IntGaugeVec =
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version"]).unwrap();
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
}
#[async_trait]
@@ -76,10 +76,10 @@ pub trait App: Send {
/// Log the versions of the application, and the arguments passed to the cli.
/// `version` should be the same as the output of cli "--version";
/// and the `short_version` is the short version of the codes, often consist of git branch and commit.
pub fn log_versions(version: &str, short_version: &str) {
pub fn log_versions(version: &str, short_version: &str, app: &str) {
// Report app version as gauge.
APP_VERSION
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version])
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version, app])
.inc();
// Log version and argument flags.

View File

@@ -244,7 +244,7 @@ impl StartCommand {
&opts.component.tracing,
None,
);
log_versions(version(), short_version());
log_versions(version(), short_version(), APP_NAME);
info!("Metasrv start command: {:#?}", self);
info!("Metasrv options: {:#?}", opts);

View File

@@ -141,6 +141,8 @@ pub struct StandaloneOptions {
pub region_engine: Vec<RegionEngineConfig>,
pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub init_regions_in_background: bool,
pub init_regions_parallelism: usize,
}
impl Default for StandaloneOptions {
@@ -168,6 +170,8 @@ impl Default for StandaloneOptions {
RegionEngineConfig::File(FileEngineConfig::default()),
],
tracing: TracingOptions::default(),
init_regions_in_background: false,
init_regions_parallelism: 16,
}
}
}
@@ -178,6 +182,16 @@ impl Configurable for StandaloneOptions {
}
}
/// The [`StandaloneOptions`] is only defined in cmd crate,
/// we don't want to make `frontend` depends on it, so impl [`Into`]
/// rather than [`From`].
#[allow(clippy::from_over_into)]
impl Into<FrontendOptions> for StandaloneOptions {
fn into(self) -> FrontendOptions {
self.frontend_options()
}
}
impl StandaloneOptions {
pub fn frontend_options(&self) -> FrontendOptions {
let cloned_opts = self.clone();
@@ -208,6 +222,9 @@ impl StandaloneOptions {
storage: cloned_opts.storage,
region_engine: cloned_opts.region_engine,
grpc: cloned_opts.grpc,
init_regions_in_background: cloned_opts.init_regions_in_background,
init_regions_parallelism: cloned_opts.init_regions_parallelism,
mode: Mode::Standalone,
..Default::default()
}
}
@@ -415,7 +432,7 @@ impl StartCommand {
&opts.component.tracing,
None,
);
log_versions(version(), short_version());
log_versions(version(), short_version(), APP_NAME);
info!("Standalone start command: {:#?}", self);
info!("Standalone options: {opts:#?}");
@@ -510,7 +527,7 @@ impl StartCommand {
.build(),
);
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
opts.wal.into(),
opts.wal.clone().into(),
kv_backend.clone(),
));
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
@@ -533,7 +550,7 @@ impl StartCommand {
.await?;
let mut frontend = FrontendBuilder::new(
fe_opts.clone(),
fe_opts,
kv_backend.clone(),
layered_cache_registry.clone(),
catalog_manager.clone(),
@@ -561,7 +578,7 @@ impl StartCommand {
let (tx, _rx) = broadcast::channel(1);
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), plugins)
let servers = Services::new(opts, Arc::new(frontend.clone()), plugins)
.build()
.await
.context(StartFrontendSnafu)?;

View File

@@ -81,7 +81,9 @@ fn test_load_datanode_example_config() {
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
selector_result_cache_size: ReadableSize::mb(512),
max_background_jobs: 4,
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),
@@ -217,7 +219,9 @@ fn test_load_standalone_example_config() {
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
selector_result_cache_size: ReadableSize::mb(512),
max_background_jobs: 4,
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),

View File

@@ -1,46 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid full table name: {}", table_name))]
InvalidFullTableName {
table_name: String,
#[snafu(implicit)]
location: Location,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidFullTableName { .. } => StatusCode::Unexpected,
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -15,7 +15,6 @@
use consts::DEFAULT_CATALOG_NAME;
pub mod consts;
pub mod error;
#[inline]
pub fn format_schema_name(catalog: &str, schema: &str) -> String {
@@ -48,19 +47,19 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
/// The database name may come from different sources:
///
/// - MySQL `schema` name in MySQL protocol login request: it's optional and user
/// and switch database using `USE` command
/// and switch database using `USE` command
/// - Postgres `database` parameter in Postgres wire protocol, required
/// - HTTP RESTful API: the database parameter, optional
/// - gRPC: the dbname field in header, optional but has a higher priority than
/// original catalog/schema
/// original catalog/schema
///
/// When database name is provided, we attempt to parse catalog and schema from
/// it. We assume the format `[<catalog>-]<schema>`:
///
/// - If `[<catalog>-]` part is not provided, we use whole database name as
/// schema name
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
/// `<catalog>` and `<schema>`.
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
match parse_optional_catalog_and_schema_from_db_string(db) {
(Some(catalog), schema) => (catalog, schema),

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use object_store::services::Fs;
use object_store::util::DefaultLoggingInterceptor;
use object_store::ObjectStore;
use snafu::ResultExt;
@@ -22,13 +23,9 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let builder = Fs::default();
let object_store = ObjectStore::new(builder.root(root))
.context(BuildBackendSnafu)?
.layer(
object_store::layers::LoggingLayer::default()
// Print the expected error only in DEBUG level.
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
.with_error_level(Some("debug"))
.expect("input error level must be valid"),
)
.layer(object_store::layers::LoggingLayer::new(
DefaultLoggingInterceptor,
))
.layer(object_store::layers::TracingLayer)
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
.finish();

View File

@@ -15,6 +15,7 @@
use std::collections::HashMap;
use object_store::services::S3;
use object_store::util::DefaultLoggingInterceptor;
use object_store::ObjectStore;
use snafu::ResultExt;
@@ -84,13 +85,9 @@ pub fn build_s3_backend(
// TODO(weny): Consider finding a better way to eliminate duplicate code.
Ok(ObjectStore::new(builder)
.context(error::BuildBackendSnafu)?
.layer(
object_store::layers::LoggingLayer::default()
// Print the expected error only in DEBUG level.
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
.with_error_level(Some("debug"))
.expect("input error level must be valid"),
)
.layer(object_store::layers::LoggingLayer::new(
DefaultLoggingInterceptor,
))
.layer(object_store::layers::TracingLayer)
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
.finish())

View File

@@ -7,6 +7,10 @@ license.workspace = true
[lints]
workspace = true
[features]
default = ["geo"]
geo = ["geohash", "h3o"]
[dependencies]
api.workspace = true
arc-swap = "1.0"
@@ -23,6 +27,8 @@ common-time.workspace = true
common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
geohash = { version = "0.13", optional = true }
h3o = { version = "0.6", optional = true }
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true
@@ -39,3 +45,4 @@ table.workspace = true
[dev-dependencies]
ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
tokio.workspace = true

View File

@@ -110,7 +110,7 @@ mod test {
use session::context::QueryContext;
use super::*;
use crate::function::{Function, FunctionContext};
use crate::function::{AsyncFunction, FunctionContext};
#[test]
fn test_flush_flow_metadata() {
@@ -130,8 +130,8 @@ mod test {
);
}
#[test]
fn test_missing_flow_service() {
#[tokio::test]
async fn test_missing_flow_service() {
let f = FlushFlowFunction;
let args = vec!["flow_name"];
@@ -140,7 +140,7 @@ mod test {
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
assert_eq!(
"Missing FlowServiceHandler, not expected",
result.to_string()

View File

@@ -32,7 +32,7 @@ pub struct FunctionContext {
impl FunctionContext {
/// Create a mock [`FunctionContext`] for test.
#[cfg(any(test, feature = "testing"))]
#[cfg(test)]
pub fn mock() -> Self {
Self {
query_ctx: QueryContextBuilder::default().build().into(),
@@ -56,8 +56,10 @@ pub trait Function: fmt::Display + Sync + Send {
/// Returns the name of the function, should be unique.
fn name(&self) -> &str;
/// The returned data type of function execution.
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType>;
/// The signature of function.
fn signature(&self) -> Signature;
/// Evaluate the function, e.g. run/execute the function.
@@ -65,3 +67,22 @@ pub trait Function: fmt::Display + Sync + Send {
}
pub type FunctionRef = Arc<dyn Function>;
/// Async Scalar function trait
#[async_trait::async_trait]
pub trait AsyncFunction: fmt::Display + Sync + Send {
/// Returns the name of the function, should be unique.
fn name(&self) -> &str;
/// The returned data type of function execution.
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType>;
/// The signature of function.
fn signature(&self) -> Signature;
/// Evaluate the function, e.g. run/execute the function.
/// TODO(dennis): simplify the signature and refactor all the admin functions.
async fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef>;
}
pub type AsyncFunctionRef = Arc<dyn AsyncFunction>;

View File

@@ -18,7 +18,7 @@ use std::sync::{Arc, RwLock};
use once_cell::sync::Lazy;
use crate::function::FunctionRef;
use crate::function::{AsyncFunctionRef, FunctionRef};
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
use crate::scalars::date::DateFunction;
use crate::scalars::expression::ExpressionFunction;
@@ -32,6 +32,7 @@ use crate::table::TableFunction;
#[derive(Default)]
pub struct FunctionRegistry {
functions: RwLock<HashMap<String, FunctionRef>>,
async_functions: RwLock<HashMap<String, AsyncFunctionRef>>,
aggregate_functions: RwLock<HashMap<String, AggregateFunctionMetaRef>>,
}
@@ -44,6 +45,27 @@ impl FunctionRegistry {
.insert(func.name().to_string(), func);
}
pub fn register_async(&self, func: AsyncFunctionRef) {
let _ = self
.async_functions
.write()
.unwrap()
.insert(func.name().to_string(), func);
}
pub fn get_async_function(&self, name: &str) -> Option<AsyncFunctionRef> {
self.async_functions.read().unwrap().get(name).cloned()
}
pub fn async_functions(&self) -> Vec<AsyncFunctionRef> {
self.async_functions
.read()
.unwrap()
.values()
.cloned()
.collect()
}
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
let _ = self
.aggregate_functions
@@ -94,6 +116,10 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
SystemFunction::register(&function_registry);
TableFunction::register(&function_registry);
// Geo functions
#[cfg(feature = "geo")]
crate::scalars::geo::GeoFunctions::register(&function_registry);
Arc::new(function_registry)
});

View File

@@ -15,6 +15,8 @@
pub mod aggregate;
pub(crate) mod date;
pub mod expression;
#[cfg(feature = "geo")]
pub mod geo;
pub mod matches;
pub mod math;
pub mod numpy;

View File

@@ -75,7 +75,7 @@ where
// to keep the not_greater length == floor+1
// so to ensure the peek of the not_greater is array[floor]
// and the peek of the greater is array[floor+1]
let p = if let Some(p) = self.p { p } else { 0.0_f64 };
let p = self.p.unwrap_or(0.0_f64);
let floor = (((self.n - 1) as f64) * p / (100_f64)).floor();
if value <= *self.not_greater.peek().unwrap() {
self.not_greater.push(value);

View File

@@ -0,0 +1,31 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod geohash;
mod h3;
use geohash::GeohashFunction;
use h3::H3Function;
use crate::function_registry::FunctionRegistry;
pub(crate) struct GeoFunctions;
impl GeoFunctions {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(GeohashFunction));
registry.register(Arc::new(H3Function));
}
}

View File

@@ -0,0 +1,135 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use geohash::Coord;
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
/// Function that return geohash string for a given geospatial coordinate.
#[derive(Clone, Debug, Default)]
pub struct GeohashFunction;
const NAME: &str = "geohash";
impl Function for GeohashFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
for coord_type in &[
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
] {
for resolution_type in &[
ConcreteDataType::int8_datatype(),
ConcreteDataType::int16_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype(),
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint32_datatype(),
ConcreteDataType::uint64_datatype(),
] {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
// resolution
resolution_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 3,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 3, provided : {}",
columns.len()
),
}
);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
let resolution_vec = &columns[2];
let size = lat_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lon = lon_vec.get(i).as_f64_lossy();
let r = match resolution_vec.get(i) {
Value::Int8(v) => v as usize,
Value::Int16(v) => v as usize,
Value::Int32(v) => v as usize,
Value::Int64(v) => v as usize,
Value::UInt8(v) => v as usize,
Value::UInt16(v) => v as usize,
Value::UInt32(v) => v as usize,
Value::UInt64(v) => v as usize,
_ => unreachable!(),
};
let result = match (lat, lon) {
(Some(lat), Some(lon)) => {
let coord = Coord { x: lon, y: lat };
let encoded = geohash::encode(coord, r)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("Geohash error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
Some(encoded)
}
_ => None,
};
results.push(result.as_deref());
}
Ok(results.to_vector())
}
}
impl fmt::Display for GeohashFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME)
}
}

View File

@@ -0,0 +1,145 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use h3o::{LatLng, Resolution};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
/// Function that returns [h3] encoding string for a given geospatial coordinate.
///
/// [h3]: https://h3geo.org/
#[derive(Clone, Debug, Default)]
pub struct H3Function;
const NAME: &str = "h3";
impl Function for H3Function {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
for coord_type in &[
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
] {
for resolution_type in &[
ConcreteDataType::int8_datatype(),
ConcreteDataType::int16_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype(),
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint32_datatype(),
ConcreteDataType::uint64_datatype(),
] {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
// resolution
resolution_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 3,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 3, provided : {}",
columns.len()
),
}
);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
let resolution_vec = &columns[2];
let size = lat_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lon = lon_vec.get(i).as_f64_lossy();
let r = match resolution_vec.get(i) {
Value::Int8(v) => v as u8,
Value::Int16(v) => v as u8,
Value::Int32(v) => v as u8,
Value::Int64(v) => v as u8,
Value::UInt8(v) => v,
Value::UInt16(v) => v as u8,
Value::UInt32(v) => v as u8,
Value::UInt64(v) => v as u8,
_ => unreachable!(),
};
let result = match (lat, lon) {
(Some(lat), Some(lon)) => {
let coord = LatLng::new(lat, lon)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
let r = Resolution::try_from(r)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
let encoded = coord.to_cell(r).to_string();
Some(encoded)
}
_ => None,
};
results.push(result.as_deref());
}
Ok(results.to_vector())
}
}
impl fmt::Display for H3Function {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME)
}
}

View File

@@ -28,7 +28,7 @@ pub struct FunctionState {
impl FunctionState {
/// Create a mock [`FunctionState`] for test.
#[cfg(any(test, feature = "testing"))]
#[cfg(test)]
pub fn mock() -> Self {
use std::sync::Arc;

View File

@@ -22,7 +22,7 @@ mod version;
use std::sync::Arc;
use build::BuildFunction;
use database::DatabaseFunction;
use database::{CurrentSchemaFunction, DatabaseFunction};
use pg_catalog::PGCatalogFunction;
use procedure_state::ProcedureStateFunction;
use timezone::TimezoneFunction;
@@ -37,8 +37,9 @@ impl SystemFunction {
registry.register(Arc::new(BuildFunction));
registry.register(Arc::new(VersionFunction));
registry.register(Arc::new(DatabaseFunction));
registry.register(Arc::new(CurrentSchemaFunction));
registry.register(Arc::new(TimezoneFunction));
registry.register(Arc::new(ProcedureStateFunction));
registry.register_async(Arc::new(ProcedureStateFunction));
PGCatalogFunction::register(registry);
}
}

View File

@@ -26,11 +26,35 @@ use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct DatabaseFunction;
const NAME: &str = "database";
#[derive(Clone, Debug, Default)]
pub struct CurrentSchemaFunction;
const DATABASE_FUNCTION_NAME: &str = "database";
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
impl Function for DatabaseFunction {
fn name(&self) -> &str {
NAME
DATABASE_FUNCTION_NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(0, vec![], Volatility::Immutable)
}
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
let db = func_ctx.query_ctx.current_schema();
Ok(Arc::new(StringVector::from_slice(&[&db])) as _)
}
}
impl Function for CurrentSchemaFunction {
fn name(&self) -> &str {
CURRENT_SCHEMA_FUNCTION_NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
@@ -54,6 +78,12 @@ impl fmt::Display for DatabaseFunction {
}
}
impl fmt::Display for CurrentSchemaFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CURRENT_SCHEMA")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@@ -14,11 +14,13 @@
mod pg_get_userbyid;
mod table_is_visible;
mod version;
use std::sync::Arc;
use pg_get_userbyid::PGGetUserByIdFunction;
use table_is_visible::PGTableIsVisibleFunction;
use version::PGVersionFunction;
use crate::function_registry::FunctionRegistry;
@@ -35,5 +37,6 @@ impl PGCatalogFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(PGTableIsVisibleFunction));
registry.register(Arc::new(PGGetUserByIdFunction));
registry.register(Arc::new(PGVersionFunction));
}
}

View File

@@ -0,0 +1,54 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::{env, fmt};
use common_query::error::Result;
use common_query::prelude::{Signature, Volatility};
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::{StringVector, VectorRef};
use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub(crate) struct PGVersionFunction;
impl fmt::Display for PGVersionFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, crate::pg_catalog_func_fullname!("VERSION"))
}
}
impl Function for PGVersionFunction {
fn name(&self) -> &str {
crate::pg_catalog_func_fullname!("version")
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(vec![], Volatility::Immutable)
}
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
let result = StringVector::from(vec![format!(
"PostgreSQL 16.3 GreptimeDB {}",
env!("CARGO_PKG_VERSION")
)]);
Ok(Arc::new(result))
}
}

View File

@@ -96,7 +96,7 @@ mod tests {
use datatypes::vectors::StringVector;
use super::*;
use crate::function::{Function, FunctionContext};
use crate::function::{AsyncFunction, FunctionContext};
#[test]
fn test_procedure_state_misc() {
@@ -114,8 +114,8 @@ mod tests {
));
}
#[test]
fn test_missing_procedure_service() {
#[tokio::test]
async fn test_missing_procedure_service() {
let f = ProcedureStateFunction;
let args = vec!["pid"];
@@ -125,15 +125,15 @@ mod tests {
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
assert_eq!(
"Missing ProcedureServiceHandler, not expected",
result.to_string()
);
}
#[test]
fn test_procedure_state() {
#[tokio::test]
async fn test_procedure_state() {
let f = ProcedureStateFunction;
let args = vec!["pid"];
@@ -143,7 +143,7 @@ mod tests {
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::mock(), &args).unwrap();
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
let expect: VectorRef = Arc::new(StringVector::from(vec![
"{\"status\":\"Done\",\"error\":\"OK\"}",

View File

@@ -31,11 +31,11 @@ pub(crate) struct TableFunction;
impl TableFunction {
/// Register all table functions to [`FunctionRegistry`].
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(MigrateRegionFunction));
registry.register(Arc::new(FlushRegionFunction));
registry.register(Arc::new(CompactRegionFunction));
registry.register(Arc::new(FlushTableFunction));
registry.register(Arc::new(CompactTableFunction));
registry.register(Arc::new(FlushFlowFunction));
registry.register_async(Arc::new(MigrateRegionFunction));
registry.register_async(Arc::new(FlushRegionFunction));
registry.register_async(Arc::new(CompactRegionFunction));
registry.register_async(Arc::new(FlushTableFunction));
registry.register_async(Arc::new(CompactTableFunction));
registry.register_async(Arc::new(FlushFlowFunction));
}
}

View File

@@ -77,7 +77,7 @@ mod tests {
use datatypes::vectors::UInt64Vector;
use super::*;
use crate::function::{Function, FunctionContext};
use crate::function::{AsyncFunction, FunctionContext};
macro_rules! define_region_function_test {
($name: ident, $func: ident) => {
@@ -97,8 +97,8 @@ mod tests {
} if valid_types == ConcreteDataType::numerics()));
}
#[test]
fn [<test_ $name _missing_table_mutation>]() {
#[tokio::test]
async fn [<test_ $name _missing_table_mutation>]() {
let f = $func;
let args = vec![99];
@@ -108,15 +108,15 @@ mod tests {
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
assert_eq!(
"Missing TableMutationHandler, not expected",
result.to_string()
);
}
#[test]
fn [<test_ $name>]() {
#[tokio::test]
async fn [<test_ $name>]() {
let f = $func;
@@ -127,7 +127,7 @@ mod tests {
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::mock(), &args).unwrap();
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
assert_eq!(expect, result);

View File

@@ -210,7 +210,7 @@ mod tests {
use session::context::QueryContext;
use super::*;
use crate::function::{Function, FunctionContext};
use crate::function::{AsyncFunction, FunctionContext};
macro_rules! define_table_function_test {
($name: ident, $func: ident) => {
@@ -230,8 +230,8 @@ mod tests {
} if valid_types == vec![ConcreteDataType::string_datatype()]));
}
#[test]
fn [<test_ $name _missing_table_mutation>]() {
#[tokio::test]
async fn [<test_ $name _missing_table_mutation>]() {
let f = $func;
let args = vec!["test"];
@@ -241,15 +241,15 @@ mod tests {
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
assert_eq!(
"Missing TableMutationHandler, not expected",
result.to_string()
);
}
#[test]
fn [<test_ $name>]() {
#[tokio::test]
async fn [<test_ $name>]() {
let f = $func;
@@ -260,7 +260,7 @@ mod tests {
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::mock(), &args).unwrap();
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
assert_eq!(expect, result);

View File

@@ -123,7 +123,7 @@ mod tests {
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
use super::*;
use crate::function::{Function, FunctionContext};
use crate::function::{AsyncFunction, FunctionContext};
#[test]
fn test_migrate_region_misc() {
@@ -140,8 +140,8 @@ mod tests {
} if sigs.len() == 2));
}
#[test]
fn test_missing_procedure_service() {
#[tokio::test]
async fn test_missing_procedure_service() {
let f = MigrateRegionFunction;
let args = vec![1, 1, 1];
@@ -151,15 +151,15 @@ mod tests {
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
assert_eq!(
"Missing ProcedureServiceHandler, not expected",
result.to_string()
);
}
#[test]
fn test_migrate_region() {
#[tokio::test]
async fn test_migrate_region() {
let f = MigrateRegionFunction;
let args = vec![1, 1, 1];
@@ -169,7 +169,7 @@ mod tests {
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
.collect::<Vec<_>>();
let result = f.eval(FunctionContext::mock(), &args).unwrap();
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
assert_eq!(expect, result);

View File

@@ -64,12 +64,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to create vector"))]
CreateVector {
#[snafu(implicit)]
@@ -137,7 +131,6 @@ impl ErrorExt for Error {
Error::DuplicatedTimestampColumn { .. }
| Error::DuplicatedColumnName { .. }
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
Error::CreateVector { .. } => StatusCode::InvalidArguments,
Error::MissingField { .. } => StatusCode::InvalidArguments,
Error::InvalidColumnDef { source, .. } => source.status_code(),

View File

@@ -187,7 +187,8 @@ fn build_struct(
}
impl crate::function::Function for #name {
#[async_trait::async_trait]
impl crate::function::AsyncFunction for #name {
fn name(&self) -> &'static str {
#display_name
}
@@ -200,7 +201,7 @@ fn build_struct(
#sig_fn()
}
fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
async fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
// Ensure under the `greptime` catalog for security
crate::ensure_greptime!(func_ctx);
@@ -212,51 +213,36 @@ fn build_struct(
};
let columns = Vec::from(columns);
// TODO(dennis): DataFusion doesn't support async UDF currently
std::thread::spawn(move || {
use snafu::OptionExt;
use datatypes::data_type::DataType;
use snafu::OptionExt;
use datatypes::data_type::DataType;
let query_ctx = &func_ctx.query_ctx;
let handler = func_ctx
.state
.#handler
.as_ref()
.context(#snafu_type)?;
let query_ctx = &func_ctx.query_ctx;
let handler = func_ctx
.state
.#handler
.as_ref()
.context(#snafu_type)?;
let mut builder = store_api::storage::ConcreteDataType::#ret()
.create_mutable_vector(rows_num);
let mut builder = store_api::storage::ConcreteDataType::#ret()
.create_mutable_vector(rows_num);
if columns_num == 0 {
let result = common_runtime::block_on_global(async move {
#fn_name(handler, query_ctx, &[]).await
})?;
if columns_num == 0 {
let result = #fn_name(handler, query_ctx, &[]).await?;
builder.push_value_ref(result.as_value_ref());
} else {
for i in 0..rows_num {
let args: Vec<_> = columns.iter()
.map(|vector| vector.get_ref(i))
.collect();
let result = #fn_name(handler, query_ctx, &args).await?;
builder.push_value_ref(result.as_value_ref());
} else {
for i in 0..rows_num {
let args: Vec<_> = columns.iter()
.map(|vector| vector.get_ref(i))
.collect();
let result = common_runtime::block_on_global(async move {
#fn_name(handler, query_ctx, &args).await
})?;
builder.push_value_ref(result.as_value_ref());
}
}
}
Ok(builder.to_vector())
})
.join()
.map_err(|e| {
common_telemetry::error!(e; "Join thread error");
common_query::error::Error::ThreadJoin {
location: snafu::Location::default(),
}
})?
Ok(builder.to_vector())
}
}

View File

@@ -76,6 +76,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument,
/// - `&QueryContextRef` as the second argument, and
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
///
/// Return type must be `common_query::error::Result<Value>`.
///
/// # Example see `common/function/src/system/procedure_state.rs`.

View File

@@ -15,8 +15,8 @@ tempfile = "3.4"
tokio.workspace = true
[target.'cfg(not(windows))'.dependencies]
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats"] }
[target.'cfg(not(windows))'.dependencies.tikv-jemalloc-sys]
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
version = "0.5"
version = "0.6"

View File

@@ -24,7 +24,7 @@ use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteKey;
use crate::key::view_info::ViewInfoKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
/// KvBackend cache invalidator
#[async_trait::async_trait]

View File

@@ -15,12 +15,12 @@
use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_procedure::error::Error as ProcedureError;
use snafu::{ensure, location, OptionExt};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
use table::metadata::TableId;
use crate::ddl::DetectingRegion;
use crate::error::{Error, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::key::table_name::TableNameKey;
use crate::key::TableMetadataManagerRef;
use crate::peer::Peer;
@@ -32,11 +32,9 @@ use crate::ClusterId;
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
move |err| {
if !err.is_retry_later() {
return Error::OperateDatanode {
location: location!(),
peer: datanode,
source: BoxedError::new(err),
};
return Err::<(), BoxedError>(BoxedError::new(err))
.context(OperateDatanodeSnafu { peer: datanode })
.unwrap_err();
}
err
}

View File

@@ -21,7 +21,7 @@ use common_macro::stack_trace_debug;
use common_wal::options::WalOptions;
use serde_json::error::Error as JsonError;
use snafu::{Location, Snafu};
use store_api::storage::{RegionId, RegionNumber};
use store_api::storage::RegionId;
use table::metadata::TableId;
use crate::peer::Peer;
@@ -49,20 +49,6 @@ pub enum Error {
region_id: RegionId,
},
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
InvalidTxnResult {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid engine type: {}", engine_type))]
InvalidEngineType {
engine_type: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to connect to Etcd"))]
ConnectEtcd {
#[snafu(source)]
@@ -95,15 +81,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Sequence out of range: {}, start={}, step={}", name, start, step))]
SequenceOutOfRange {
name: String,
start: u64,
step: u64,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unexpected sequence value: {}", err_msg))]
UnexpectedSequenceValue {
err_msg: String,
@@ -327,13 +304,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Catalog already exists, catalog: {}", catalog))]
CatalogAlreadyExists {
catalog: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Schema already exists, catalog:{}, schema: {}", catalog, schema))]
SchemaAlreadyExists {
catalog: String,
@@ -385,15 +355,8 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to rename table, reason: {}", reason))]
RenameTable {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid table metadata, err: {}", err_msg))]
InvalidTableMetadata {
#[snafu(display("Invalid metadata, err: {}", err_msg))]
InvalidMetadata {
err_msg: String,
#[snafu(implicit)]
location: Location,
@@ -423,27 +386,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Failed to move region {} in table {}, err: {}",
region,
table_id,
err_msg
))]
MoveRegion {
table_id: TableId,
region: RegionNumber,
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid catalog value"))]
InvalidCatalogValue {
source: common_catalog::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("External error"))]
External {
#[snafu(implicit)]
@@ -612,13 +554,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Delimiter not found, key: {}", key))]
DelimiterNotFound {
key: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
MismatchPrefix {
prefix: String,
@@ -702,15 +637,12 @@ impl ErrorExt for Error {
| ParseOption { .. }
| RouteInfoCorrupted { .. }
| InvalidProtoMsg { .. }
| InvalidTableMetadata { .. }
| MoveRegion { .. }
| InvalidMetadata { .. }
| Unexpected { .. }
| TableInfoNotFound { .. }
| NextSequence { .. }
| SequenceOutOfRange { .. }
| UnexpectedSequenceValue { .. }
| InvalidHeartbeatResponse { .. }
| InvalidTxnResult { .. }
| EncodeJson { .. }
| DecodeJson { .. }
| PayloadNotExist { .. }
@@ -734,22 +666,17 @@ impl ErrorExt for Error {
| MetadataCorruption { .. }
| StrFromUtf8 { .. } => StatusCode::Unexpected,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } | RenameTable { .. } => {
StatusCode::Internal
}
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,
ProcedureNotFound { .. }
| InvalidViewInfo { .. }
| PrimaryKeyNotFound { .. }
| CatalogAlreadyExists { .. }
| EmptyKey { .. }
| InvalidEngineType { .. }
| AlterLogicalTablesInvalidArguments { .. }
| CreateLogicalTablesInvalidArguments { .. }
| MismatchPrefix { .. }
| DelimiterNotFound { .. }
| TlsConfig { .. } => StatusCode::InvalidArguments,
FlowNotFound { .. } => StatusCode::FlowNotFound,
@@ -767,7 +694,6 @@ impl ErrorExt for Error {
OperateDatanode { source, .. } => source.status_code(),
Table { source, .. } => source.status_code(),
RetryLater { source, .. } => source.status_code(),
InvalidCatalogValue { source, .. } => source.status_code(),
ConvertAlterTableRequest { source, .. } => source.status_code(),
ParseProcedureId { .. }

View File

@@ -153,6 +153,9 @@ pub struct UpgradeRegion {
/// it's helpful to verify whether the leader region is ready.
#[serde(with = "humantime_serde")]
pub wait_for_replay_timeout: Option<Duration>,
/// The hint for replaying memtable.
#[serde(default)]
pub location_id: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]

View File

@@ -211,7 +211,7 @@ lazy_static! {
}
/// The key of metadata.
pub trait MetaKey<'a, T> {
pub trait MetadataKey<'a, T> {
fn to_bytes(&self) -> Vec<u8>;
fn from_bytes(bytes: &'a [u8]) -> Result<T>;
@@ -226,7 +226,7 @@ impl From<Vec<u8>> for BytesAdapter {
}
}
impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
fn to_bytes(&self) -> Vec<u8> {
self.0.clone()
}
@@ -236,7 +236,7 @@ impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
}
}
pub(crate) trait TableMetaKeyGetTxnOp {
pub(crate) trait MetadataKeyGetTxnOp {
fn build_get_op(
&self,
) -> (
@@ -245,7 +245,7 @@ pub(crate) trait TableMetaKeyGetTxnOp {
);
}
pub trait TableMetaValue {
pub trait MetadataValue {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
where
Self: Sized;
@@ -330,7 +330,7 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
}
}
impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
impl<'de, T: DeserializeOwned + Serialize + MetadataValue> Deserialize<'de>
for DeserializedValueWithBytes<T>
{
/// - Deserialize behaviors:
@@ -359,7 +359,7 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
}
}
impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
impl<T: Serialize + DeserializeOwned + MetadataValue> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
@@ -1156,10 +1156,10 @@ impl TableMetadataManager {
}
#[macro_export]
macro_rules! impl_table_meta_value {
macro_rules! impl_metadata_value {
($($val_ty: ty), *) => {
$(
impl $crate::key::TableMetaValue for $val_ty {
impl $crate::key::MetadataValue for $val_ty {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
}
@@ -1172,10 +1172,10 @@ macro_rules! impl_table_meta_value {
}
}
macro_rules! impl_meta_key_get_txn_op {
macro_rules! impl_metadata_key_get_txn_op {
($($key: ty), *) => {
$(
impl $crate::key::TableMetaKeyGetTxnOp for $key {
impl $crate::key::MetadataKeyGetTxnOp for $key {
/// Returns a [TxnOp] to retrieve the corresponding value
/// and a filter to retrieve the value from the [TxnOpGetResponseSet]
fn build_get_op(
@@ -1197,7 +1197,7 @@ macro_rules! impl_meta_key_get_txn_op {
}
}
impl_meta_key_get_txn_op! {
impl_metadata_key_get_txn_op! {
TableNameKey<'_>,
TableInfoKey,
ViewInfoKey,
@@ -1206,7 +1206,7 @@ impl_meta_key_get_txn_op! {
}
#[macro_export]
macro_rules! impl_optional_meta_value {
macro_rules! impl_optional_metadata_value {
($($val_ty: ty), *) => {
$(
impl $val_ty {
@@ -1222,7 +1222,7 @@ macro_rules! impl_optional_meta_value {
}
}
impl_table_meta_value! {
impl_metadata_value! {
TableNameValue,
TableInfoValue,
ViewInfoValue,
@@ -1233,7 +1233,7 @@ impl_table_meta_value! {
TableFlowValue
}
impl_optional_meta_value! {
impl_optional_metadata_value! {
CatalogNameValue,
SchemaNameValue
}

View File

@@ -20,8 +20,8 @@ use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error::{self, Error, InvalidTableMetadataSnafu, Result};
use crate::key::{MetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
use crate::error::{self, Error, InvalidMetadataSnafu, Result};
use crate::key::{MetadataKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -56,14 +56,14 @@ impl<'a> CatalogNameKey<'a> {
}
}
impl<'a> MetaKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
impl<'a> MetadataKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<CatalogNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"CatalogNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -87,7 +87,7 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = CATALOG_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal CatalogNameKey format: '{s}'"),
})?;

View File

@@ -22,10 +22,10 @@ use snafu::OptionExt;
use store_api::storage::RegionNumber;
use table::metadata::TableId;
use super::MetaKey;
use crate::error::{InvalidTableMetadataSnafu, Result};
use super::MetadataKey;
use crate::error::{InvalidMetadataSnafu, Result};
use crate::key::{
RegionDistribution, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
@@ -77,14 +77,14 @@ impl DatanodeTableKey {
}
}
impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &[u8]) -> Result<DatanodeTableKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"DatanodeTableKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -92,12 +92,11 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
}
.build()
})?;
let captures =
DATANODE_TABLE_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
})?;
let captures = DATANODE_TABLE_KEY_PATTERN
.captures(key)
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
})?;
// Safety: pass the regex check above
let datanode_id = captures[1].parse::<DatanodeId>().unwrap();
let table_id = captures[2].parse::<TableId>().unwrap();

View File

@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
use crate::key::flow::flownode_flow::FlownodeFlowManager;
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{FlowId, MetaKey};
use crate::key::{FlowId, MetadataKey};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchDeleteRequest;
@@ -66,7 +66,7 @@ impl<T> FlowScoped<T> {
}
}
impl<'a, T: MetaKey<'a, T>> MetaKey<'a, FlowScoped<T>> for FlowScoped<T> {
impl<'a, T: MetadataKey<'a, T>> MetadataKey<'a, FlowScoped<T>> for FlowScoped<T> {
fn to_bytes(&self) -> Vec<u8> {
let prefix = FlowScoped::<T>::PREFIX.as_bytes();
let inner = self.inner.to_bytes();
@@ -295,7 +295,7 @@ mod tests {
inner: Vec<u8>,
}
impl<'a> MetaKey<'a, MockKey> for MockKey {
impl<'a> MetadataKey<'a, MockKey> for MockKey {
fn to_bytes(&self) -> Vec<u8> {
self.inner.clone()
}

View File

@@ -25,7 +25,7 @@ use table::table_name::TableName;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::FlownodeId;
@@ -42,7 +42,7 @@ lazy_static! {
/// The layout: `__flow/info/{flow_id}`.
pub struct FlowInfoKey(FlowScoped<FlowInfoKeyInner>);
impl<'a> MetaKey<'a, FlowInfoKey> for FlowInfoKey {
impl<'a> MetadataKey<'a, FlowInfoKey> for FlowInfoKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -80,14 +80,14 @@ impl FlowInfoKeyInner {
}
}
impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<FlowInfoKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -98,7 +98,7 @@ impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
let captures =
FLOW_INFO_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -24,7 +24,7 @@ use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{
BytesAdapter, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
@@ -76,7 +76,7 @@ impl<'a> FlowNameKey<'a> {
}
}
impl<'a> MetaKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
impl<'a> MetadataKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -95,7 +95,7 @@ pub struct FlowNameKeyInner<'a> {
pub flow_name: &'a str,
}
impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOW_NAME_KEY_PREFIX}/{}/{}",
@@ -106,7 +106,7 @@ impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowNameKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -117,7 +117,7 @@ impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
let captures =
FLOW_NAME_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -22,7 +22,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -68,7 +68,7 @@ impl FlowRouteKey {
}
}
impl<'a> MetaKey<'a, FlowRouteKey> for FlowRouteKey {
impl<'a> MetadataKey<'a, FlowRouteKey> for FlowRouteKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -101,7 +101,7 @@ impl FlowRouteKeyInner {
}
}
impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOW_ROUTE_KEY_PREFIX}/{}/{}",
@@ -112,7 +112,7 @@ impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<FlowRouteKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -123,7 +123,7 @@ impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
let captures =
FLOW_ROUTE_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
})?;
// Safety: pass the regex check above
@@ -209,7 +209,7 @@ impl FlowRouteManager {
#[cfg(test)]
mod tests {
use super::FlowRouteKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
#[test]
fn test_key_serialization() {

View File

@@ -22,7 +22,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
@@ -44,7 +44,7 @@ const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode";
/// The layout `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}`
pub struct FlownodeFlowKey(FlowScoped<FlownodeFlowKeyInner>);
impl<'a> MetaKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
impl<'a> MetadataKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -113,7 +113,7 @@ impl FlownodeFlowKeyInner {
}
}
impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}",
@@ -124,7 +124,7 @@ impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<FlownodeFlowKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlownodeFlowKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -135,7 +135,7 @@ impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
let captures =
FLOWNODE_FLOW_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlownodeFlowKeyInner '{key}'"),
})?;
// Safety: pass the regex check above
@@ -208,7 +208,7 @@ impl FlownodeFlowManager {
#[cfg(test)]
mod tests {
use crate::key::flow::flownode_flow::FlownodeFlowKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
#[test]
fn test_key_serialization() {

View File

@@ -23,7 +23,7 @@ use table::metadata::TableId;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -56,7 +56,7 @@ struct TableFlowKeyInner {
#[derive(Debug, PartialEq)]
pub struct TableFlowKey(FlowScoped<TableFlowKeyInner>);
impl<'a> MetaKey<'a, TableFlowKey> for TableFlowKey {
impl<'a> MetadataKey<'a, TableFlowKey> for TableFlowKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -129,7 +129,7 @@ impl TableFlowKeyInner {
}
}
impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}",
@@ -140,7 +140,7 @@ impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<TableFlowKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"TableFlowKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -151,7 +151,7 @@ impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
let captures =
TABLE_FLOW_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid TableFlowKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -23,8 +23,8 @@ use humantime_serde::re::humantime;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error::{self, Error, InvalidTableMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -89,6 +89,19 @@ impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
}
}
impl From<SchemaNameValue> for HashMap<String, String> {
fn from(value: SchemaNameValue) -> Self {
let mut opts = HashMap::new();
if let Some(ttl) = value.ttl {
opts.insert(
OPT_KEY_TTL.to_string(),
format!("{}", humantime::format_duration(ttl)),
);
}
opts
}
}
impl<'a> SchemaNameKey<'a> {
pub fn new(catalog: &'a str, schema: &'a str) -> Self {
Self { catalog, schema }
@@ -109,14 +122,14 @@ impl Display for SchemaNameKey<'_> {
}
}
impl<'a> MetaKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
impl<'a> MetadataKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<SchemaNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"SchemaNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -142,7 +155,7 @@ impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = SCHEMA_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal SchemaNameKey format: '{s}'"),
})?;

View File

@@ -23,9 +23,9 @@ use table::table_name::TableName;
use table::table_reference::TableReference;
use super::TABLE_INFO_KEY_PATTERN;
use crate::error::{InvalidTableMetadataSnafu, Result};
use crate::error::{InvalidMetadataSnafu, Result};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX};
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchGetRequest;
@@ -51,14 +51,14 @@ impl Display for TableInfoKey {
}
}
impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &[u8]) -> Result<TableInfoKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"TableInfoKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -68,7 +68,7 @@ impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
})?;
let captures = TABLE_INFO_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid TableInfoKey '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -22,8 +22,8 @@ use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use super::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidMetadataSnafu, Result};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
@@ -63,14 +63,14 @@ impl Display for TableNameKey<'_> {
}
}
impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<TableNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"TableNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -80,7 +80,7 @@ impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
})?;
let captures = TABLE_NAME_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid TableNameKey '{key}'"),
})?;
let catalog = captures.get(1).unwrap().as_str();
@@ -128,7 +128,7 @@ impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = TABLE_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal TableNameKey format: '{s}'"),
})?;
// Safety: pass the regex check above

Some files were not shown because too many files have changed in this diff Show More