Compare commits

...

41 Commits

Author SHA1 Message Date
JohnsonLee
a8477e4142 fix: table resolving logic related to pg_catalog (#4580)
* fix: table resolving logic related to pg_catalog

refer to
https://github.com/GreptimeTeam/greptimedb/issues/3560#issuecomment-2287794348
and #4543

* refactor: remove CatalogProtocol type

* fix: sqlness

* fix: forbid create database pg_catalog with mysql client

* refactor: use QueryContext as arguments rather than Channel

* refactor: pass None as default behaviour in information_schema

* test: fix test
2024-09-09 00:47:59 +00:00
Yiran
b950e705f5 chore: update the document link in README.md (#4690) 2024-09-07 15:27:32 +00:00
Ruihang Xia
d2d62e0c6f fix: unconditional statistics (#4694)
* fix: unconditional statistics

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more sqlness case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-07 04:28:11 +00:00
localhost
5d9f8a3be7 feat: add test pipeline api (#4667)
* chore: add test pipeline api

* chore: add test for test pipeline api

* chore: fix taplo check

* chore: change pipeline dryrun api path

* chore: add more info for pipeline dryrun api
2024-09-06 08:36:49 +00:00
jeremyhi
e88465840d feat: add extension field to HeartbeatRequest (#4688)
* feat: add extension field to HeartbeatRequest

* chore: extension to extensions

* chore: upgrade proto
2024-09-06 08:29:20 +00:00
localhost
67d95d2088 refactor!: add processor builder and transform buidler (#4571)
* chore: add processor builder and transform buidler

* chore: in process

* chore: intermediate state from hashmap to vector in pipeline

* chore: remove useless code and rename some struct

* chore: fix typos

* chore: format code

* chore: add error handling and optimize code readability

* chore: fix typos

* chore: remove useless code

* chore: add some doc

* chore: fix by pr commit

* chore: remove useless code and change struct name

* chore: modify the location of the find_key_index function.
2024-09-06 07:51:08 +00:00
Yingwen
506dc20765 fix: last non null iter not init (#4687) 2024-09-06 04:13:23 +00:00
Lei, HUANG
114772ba87 chore: bump version v0.9.3 (#4684) 2024-09-06 02:31:41 +00:00
liyang
89a3da8a3a chore(dockerfile): remove mysql and postgresql clients in greptimedb image (#4685) 2024-09-05 16:00:53 +00:00
jeremyhi
8814695b58 feat: invalidate cache via invalidator on region migration (#4682)
feat: invalidate table via invalidator on region migration
2024-09-05 06:15:38 +00:00
Lanqing Yang
86cef648cd feat: add more spans to mito engine (#4643)
feat: add more span on mito engine
2024-09-05 06:13:22 +00:00
Ning Sun
e476e36647 feat: add geohash and h3 as built-in functions (#4656)
* feat: add built-in functions h3 and geohash

* tests: add sqlness tests for geo functions

* doc: correct h3 comment

* fix: lint error

* fix: toml format

* refactor: address review comments

* test: add more sqlness cases

* Apply suggestions from code review

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 04:42:29 +00:00
shuiyisong
4781b327f3 fix: ref to auth err (#4681)
* fix: ref to auth err

* fix: typo
2024-09-05 04:05:39 +00:00
LFC
3e4a69017d build: add mysql and postgresql clients to greptimedb image (#4677) 2024-09-04 11:38:47 +00:00
LFC
d43e31c7ed feat: schedule compaction when adding sst files by editing region (#4648)
* feat: schedule compaction when adding sst files by editing region

* add minimum time interval for two successive compactions

* resolve PR comments
2024-09-04 10:10:07 +00:00
discord9
19e2a9d44b feat: change log level dynamically (#4653)
* feat: add dyn_log handle

* feat: use reload handle

* chore: per review
2024-09-04 07:54:50 +00:00
zyy17
8453df1392 refactor: make init_global_logging() clean and add log_format (#4657)
refactor: refine the code logic of init_global_logging and add json output format
2024-09-04 03:04:51 +00:00
Ruihang Xia
8ca35a4a1a fix: use number of partitions as parallilism in region scanner (#4669)
* fix: use number of partitions as parallilism in region scanner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Co-authored-by: Lei HUANG <mrsatangel@gmail.com>

* order by ts

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* debug pring time range

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei HUANG <mrsatangel@gmail.com>
2024-09-03 13:42:38 +00:00
Ruihang Xia
93f202694c refactor: remove unused error variants (#4666)
* add python script

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix all negative cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 13:19:38 +00:00
Weny Xu
b52e3c694a chore(ci): set etcd resources limits (#4665) 2024-09-03 07:25:23 +00:00
dennis zhuang
a612b67470 feat: supports name in object storage config (#4630)
* feat: supports name in object storage config

* fix: integration test

* fix: integration test

* fix: update sample config

* fix: config api test
2024-09-03 07:02:55 +00:00
jeremyhi
9b03940e03 chore: refactor metadata key value trait (#4664) 2024-09-03 07:00:24 +00:00
jeremyhi
8d6cd8ae16 feat: export import database (#4654)
* feat: export database create sql

* feat: import create database

* Update src/cmd/src/cli/export.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/cli/import.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/error.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: make show create fail fast

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 04:45:25 +00:00
dennis zhuang
8f4ec536de feat: grpc writing supports TTL hint (#4651) 2024-09-03 02:15:01 +00:00
zyy17
f0e2d6e663 fix: use 'target' for 'actions-rust-lang/setup-rust-toolchain' to fix cross build failed (#4661) 2024-09-02 06:11:12 +00:00
Weny Xu
306bd25c64 fix: expose missing options for initializing regions (#4660)
* fix: expose `init_regions_in_background` and `init_regions_parallelism` opts

* fix: ci
2024-09-02 03:11:18 +00:00
zyy17
ddafcc678c ci: disable macos integration test and some minor refactoring (#4658) 2024-09-02 03:06:17 +00:00
Weny Xu
2564b5daee fix: correct otlp endpoint formatting (#4646) 2024-09-02 02:59:50 +00:00
Lei, HUANG
37dcf34bb9 fix(mito): avoid caching empty batches in row group (#4652)
* fix: avoid caching empty batches in row group

* fix: clippy

* Update tests/cases/standalone/common/select/last_value.sql

* fix: sqlness
2024-09-02 02:43:00 +00:00
Yingwen
8eda36bfe3 feat: remove files from the write cache in purger (#4655)
* feat: remove files from the write cache in purger

* chore: fix typo
2024-08-31 04:19:52 +00:00
Ruihang Xia
68b59e0e5e feat: remove the requirement that partition column must be PK (#4647)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-31 03:16:01 +00:00
Ruihang Xia
a37aeb2814 feat: initialize partition range from ScanInput (#4635)
* feat: initialize partition range from ScanInput

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use num_rows instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add todo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup unordered scan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/mito2/src/read/scan_region.rs

Co-authored-by: jeremyhi <jiachun_feng@proton.me>

* leave unordered scan unchanged

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2024-08-30 07:30:37 +00:00
jeremyhi
f641c562c2 feat: show create database (#4642)
* feat: show create database

* feat: add sqlness test

* chore: reorder mod and use

* feat: show create schema

* Update src/frontend/src/instance.rs
2024-08-30 03:58:11 +00:00
Lanqing Yang
9286e963e7 chore: adding heartbeat sent/recv counts in greptimedb nodes (#4624)
obs: adding heartbeat sent/recv counts in greptimedb nodes
2024-08-30 03:57:16 +00:00
LFC
8ea4f67e4b refactor: reduce a object store "stat" call (#4645) 2024-08-30 03:31:19 +00:00
jeremyhi
5e4bac2633 feat: import cli tool (#4639)
* feat: import create tables

* feat: import databasse

* fix: export view schema
2024-08-29 09:32:21 +00:00
LFC
d45b04180c feat: pre-download the ingested sst (#4636)
* refactor: pre-read the ingested sst file in object store to fill the local cache to accelerate first query

* feat: pre-download the ingested SST from remote to accelerate following reads

* resolve PR comments

* resolve PR comments
2024-08-29 08:36:41 +00:00
discord9
8c8499ce53 perf(flow): Map&Reduce Operator use batch to reduce alloc (#4567)
* feat: partial impl mfp

* feat: eval batch inner

* chore: fmt

* feat: mfp eval_batch

* WIP

* feat: Collection generic over row&Batch

* feat: render source batch

* chore: chore

* feat: render mfp batch

* feat: render reduce batch(WIP)

* feat(WIP): render reduce

* feat: reduce batch

* feat: render sink batch

* feat: render constant batch

* chore: error handling& mfp batch test

* test: mfp batch

* chore: rm import

* test: render reduce batch

* chore: add TODO

* chore: per bot review

* refactor: per review

* chore: cmt

* chore: rename

* docs: update no panic
2024-08-29 07:28:13 +00:00
Weny Xu
79f40a762b fix: set selector_result_cache_size in unit test again (#4641) 2024-08-29 07:14:40 +00:00
jeremyhi
b062d8515d feat: copy database ignores view and temporary tables (#4640)
feat: copy database ingores view and temporary tables
2024-08-29 06:17:51 +00:00
discord9
9f9c1dab60 feat(flow): use DataFusion's optimizer (#4489)
* feat: use datafusion optimization

refactor: mv `sql_to_flow_plan` elsewhere

feat(WIP): use df optimization

WIP analyzer rule

feat(WIP): avg expander

fix: transform avg expander

fix: avg expand

feat: names from substrait

fix: avg rewrite

test: update `test_avg`&`test_avg_group_by`

test: fix `test_sum`

test: fix some tests

chore: remove unused flow plan transform

feat: tumble expander

test: update tests

* chore: clippy

* fix: tumble lose `group expr`

* test: sqlness test update

* test: rm unused cast

* test: simplify sqlness

* refactor: per review

* chore: after rebase

* fix: remove a outdated test

* test: add comment

* fix: report error when not literal

* chore: update sqlness test after rebase

* refactor: per review
2024-08-29 02:52:00 +00:00
258 changed files with 8893 additions and 4937 deletions

View File

@@ -42,7 +42,7 @@ runs:
- name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
- name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}

View File

@@ -27,7 +27,7 @@ runs:
- name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
components: llvm-tools-preview
- name: Rust Cache

View File

@@ -18,6 +18,8 @@ runs:
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set resources.limits.cpu=1000m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \

View File

@@ -38,7 +38,7 @@ runs:
steps:
- name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -25,7 +25,7 @@ runs:
steps:
- name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -616,8 +616,8 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Run cargo fmt
run: cargo fmt --all -- --check
- name: Check format
run: make fmt-check
clippy:
name: Clippy

View File

@@ -33,6 +33,7 @@ on:
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
@@ -98,16 +99,6 @@ permissions:
contents: write # Allows the action to create a release.
jobs:
check-builder-rust-version:
name: Check rust version in builder
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
@@ -132,6 +123,11 @@ jobs:
with:
fetch-depth: 0
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
@@ -256,7 +252,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result

205
Cargo.lock generated
View File

@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-decimal",
@@ -762,7 +762,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -1286,7 +1286,7 @@ dependencies = [
[[package]]
name = "cache"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"catalog",
"common-error",
@@ -1294,7 +1294,7 @@ dependencies = [
"common-meta",
"moka",
"snafu 0.8.4",
"substrait 0.9.2",
"substrait 0.9.3",
]
[[package]]
@@ -1321,7 +1321,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow",
@@ -1647,7 +1647,7 @@ checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
[[package]]
name = "client"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -1677,7 +1677,7 @@ dependencies = [
"serde_json",
"snafu 0.8.4",
"substrait 0.37.3",
"substrait 0.9.2",
"substrait 0.9.3",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -1707,7 +1707,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"auth",
@@ -1763,7 +1763,7 @@ dependencies = [
"session",
"snafu 0.8.4",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"temp-env",
"tempfile",
@@ -1809,7 +1809,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anymap",
"bitvec",
@@ -1825,7 +1825,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"chrono",
"common-error",
@@ -1836,7 +1836,7 @@ dependencies = [
[[package]]
name = "common-config"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-error",
@@ -1859,7 +1859,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"arrow-schema",
@@ -1896,7 +1896,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"bigdecimal",
"common-error",
@@ -1909,7 +1909,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"snafu 0.8.4",
"strum 0.25.0",
@@ -1918,7 +1918,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -1933,7 +1933,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -1950,6 +1950,8 @@ dependencies = [
"common-version",
"datafusion",
"datatypes",
"geohash",
"h3o",
"num",
"num-traits",
"once_cell",
@@ -1968,7 +1970,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-runtime",
@@ -1985,7 +1987,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -2011,7 +2013,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"common-base",
@@ -2029,7 +2031,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arc-swap",
"common-query",
@@ -2043,7 +2045,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-error",
"common-macro",
@@ -2056,7 +2058,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anymap2",
"api",
@@ -2112,11 +2114,11 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "0.9.2"
version = "0.9.3"
[[package]]
name = "common-procedure"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-stream",
"async-trait",
@@ -2142,7 +2144,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-procedure",
@@ -2150,7 +2152,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -2176,7 +2178,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arc-swap",
"common-error",
@@ -2195,7 +2197,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"common-error",
@@ -2217,7 +2219,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"atty",
"backtrace",
@@ -2244,7 +2246,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"client",
"common-query",
@@ -2256,7 +2258,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"chrono",
@@ -2272,7 +2274,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"build-data",
"const_format",
@@ -2283,7 +2285,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"common-base",
"common-error",
@@ -3091,7 +3093,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -3140,7 +3142,7 @@ dependencies = [
"session",
"snafu 0.8.4",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"toml 0.8.14",
@@ -3149,11 +3151,12 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arrow",
"arrow-array",
"arrow-schema",
"base64 0.21.7",
"common-base",
"common-decimal",
"common-error",
@@ -3162,6 +3165,7 @@ dependencies = [
"common-time",
"datafusion-common",
"enum_dispatch",
"greptime-proto",
"num",
"num-traits",
"ordered-float 3.9.2",
@@ -3719,7 +3723,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -3813,9 +3817,15 @@ dependencies = [
"num-traits",
]
[[package]]
name = "float_eq"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
[[package]]
name = "flow"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow",
@@ -3872,7 +3882,7 @@ dependencies = [
"snafu 0.8.4",
"store-api",
"strum 0.25.0",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tonic 0.11.0",
@@ -3919,7 +3929,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -4211,6 +4221,27 @@ dependencies = [
"version_check",
]
[[package]]
name = "geo-types"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
dependencies = [
"approx",
"num-traits",
"serde",
]
[[package]]
name = "geohash"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fb94b1a65401d6cbf22958a9040aa364812c26674f841bee538b12c135db1e6"
dependencies = [
"geo-types",
"libm",
]
[[package]]
name = "gethostname"
version = "0.2.3"
@@ -4271,7 +4302,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c437b55725b7f5224fe9d46db21072b4a682ee4b#c437b55725b7f5224fe9d46db21072b4a682ee4b"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=157cfdb52709e489cf1f3ce8e3042ed4ee8a524a#157cfdb52709e489cf1f3ce8e3042ed4ee8a524a"
dependencies = [
"prost 0.12.6",
"serde",
@@ -4301,6 +4332,25 @@ dependencies = [
"tracing",
]
[[package]]
name = "h3o"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de3592e1f699692aa0525c42ff7879ec3ee7e36329af20967bc910a1cdc39c7"
dependencies = [
"ahash 0.8.11",
"either",
"float_eq",
"h3o-bit",
"libm",
]
[[package]]
name = "h3o-bit"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb45e8060378c0353781abf67e1917b545a6b710d0342d85b70c125af7ef320"
[[package]]
name = "half"
version = "1.8.3"
@@ -4717,7 +4767,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
"socket2 0.4.10",
"socket2 0.5.7",
"tokio",
"tower-service",
"tracing",
@@ -5030,7 +5080,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -5810,7 +5860,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "log-store"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-stream",
"async-trait",
@@ -6122,7 +6172,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -6148,7 +6198,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -6226,7 +6276,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -6317,7 +6367,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -6964,7 +7014,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"anyhow",
"bytes",
@@ -7211,7 +7261,7 @@ dependencies = [
[[package]]
name = "operator"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -7256,7 +7306,7 @@ dependencies = [
"sql",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tokio-util",
@@ -7506,7 +7556,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -7795,7 +7845,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"api",
@@ -7956,7 +8006,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"auth",
"common-base",
@@ -8225,7 +8275,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -8460,7 +8510,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-compression 0.4.11",
"async-trait",
@@ -8512,7 +8562,7 @@ dependencies = [
"indoc",
"libc",
"memoffset 0.9.1",
"parking_lot 0.11.2",
"parking_lot 0.12.3",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
@@ -8582,7 +8632,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8645,7 +8695,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tokio",
"tokio-stream",
@@ -10007,7 +10057,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -10301,7 +10351,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"aide",
"api",
@@ -10407,7 +10457,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arc-swap",
@@ -10708,7 +10758,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"chrono",
@@ -10768,7 +10818,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"clap 4.5.7",
@@ -10985,7 +11035,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"aquamarine",
@@ -11154,7 +11204,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"async-trait",
"bytes",
@@ -11355,7 +11405,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"async-trait",
@@ -11620,7 +11670,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"arbitrary",
"async-trait",
@@ -11662,7 +11712,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.9.2"
version = "0.9.3"
dependencies = [
"api",
"arrow-flight",
@@ -11722,7 +11772,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.9.2",
"substrait 0.9.3",
"table",
"tempfile",
"time",
@@ -12441,6 +12491,16 @@ dependencies = [
"web-time 0.2.4",
]
[[package]]
name = "tracing-serde"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.18"
@@ -12451,12 +12511,15 @@ dependencies = [
"nu-ansi-term",
"once_cell",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log 0.2.0",
"tracing-serde",
]
[[package]]

View File

@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.9.2"
version = "0.9.3"
edition = "2021"
license = "Apache-2.0"
@@ -120,7 +120,7 @@ etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "157cfdb52709e489cf1f3ce8e3042ed4ee8a524a" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"

View File

@@ -191,6 +191,7 @@ fix-clippy: ## Fix clippy violations.
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose.

View File

@@ -74,7 +74,7 @@ Our core developers have been building time-series data platforms for years. Bas
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
## Try GreptimeDB

View File

@@ -15,6 +15,8 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -127,6 +129,7 @@
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -150,11 +153,12 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
@@ -235,11 +239,12 @@
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
@@ -299,11 +304,12 @@
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
@@ -426,6 +432,7 @@
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -447,11 +454,12 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
@@ -492,11 +500,12 @@
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |

View File

@@ -336,7 +336,7 @@ credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential= "base64-credential"
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
@@ -360,9 +360,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -441,6 +455,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -531,7 +549,7 @@ fork_dictionary_bytes = "1GiB"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -547,6 +565,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -59,7 +59,7 @@ retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -75,6 +75,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -166,7 +166,7 @@ tcp_nodelay = true
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -182,6 +182,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -153,7 +153,7 @@ backoff_deadline = "5mins"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -169,6 +169,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -8,6 +8,13 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
@@ -391,9 +398,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -472,6 +493,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -568,7 +593,7 @@ fork_dictionary_bytes = "1GiB"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -584,6 +609,9 @@ otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

69
scripts/check-snafu.py Normal file
View File

@@ -0,0 +1,69 @@
# Copyright 2023 Greptime Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
def find_rust_files(directory):
error_files = []
other_rust_files = []
for root, _, files in os.walk(directory):
for file in files:
if file == "error.rs":
error_files.append(os.path.join(root, file))
elif file.endswith(".rs"):
other_rust_files.append(os.path.join(root, file))
return error_files, other_rust_files
def extract_branch_names(file_content):
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu"
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
return False
def main():
error_files, other_rust_files = find_rust_files(".")
branch_names = []
for error_file in error_files:
with open(error_file, "r") as file:
content = file.read()
branch_names.extend(extract_branch_names(content))
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)
if __name__ == "__main__":
main()

View File

@@ -21,14 +21,14 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
#[derive(Debug)]
pub struct RegionResponse {
pub affected_rows: AffectedRows,
pub extension: HashMap<String, Vec<u8>>,
pub extensions: HashMap<String, Vec<u8>>,
}
impl RegionResponse {
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
Self {
affected_rows: region_response.affected_rows as _,
extension: region_response.extension,
extensions: region_response.extensions,
}
}
@@ -36,7 +36,7 @@ impl RegionResponse {
pub fn new(affected_rows: AffectedRows) -> Self {
Self {
affected_rows,
extension: Default::default(),
extensions: Default::default(),
}
}
}

View File

@@ -13,9 +13,11 @@
// limitations under the License.
use common_base::secrets::ExposeSecret;
use common_error::ext::BoxedError;
use snafu::{OptionExt, ResultExt};
use crate::error::{
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
@@ -49,6 +51,19 @@ impl MockUserProvider {
info.schema.clone_into(&mut self.schema);
info.username.clone_into(&mut self.username);
}
// this is a deliberate function to ref AuthBackendSnafu
// so that it won't get deleted in the future
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
let none_option = None;
none_option
.context(UserNotFoundSnafu {
username: "no_user".to_string(),
})
.map_err(BoxedError::new)
.context(AuthBackendSnafu)
}
}
#[async_trait::async_trait]

View File

@@ -18,6 +18,7 @@ use std::sync::Arc;
use api::v1::greptime_request::Request;
use auth::error::Error::InternalState;
use auth::error::InternalStateSnafu;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
use sql::statements::show::{ShowDatabases, ShowKind};
use sql::statements::statement::Statement;
@@ -33,9 +34,10 @@ impl PermissionChecker for DummyPermissionChecker {
match req {
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
_ => Err(InternalState {
_ => InternalStateSnafu {
msg: "testing".to_string(),
}),
}
.fail(),
}
}
}

View File

@@ -97,13 +97,6 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
@@ -186,13 +179,6 @@ pub enum Error {
source: common_query::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
#[snafu(implicit)]
@@ -288,8 +274,6 @@ impl ErrorExt for Error {
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source, .. } => source.status_code(),
@@ -303,7 +287,6 @@ impl ErrorExt for Error {
Error::CreateTable { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
@@ -338,27 +321,6 @@ mod tests {
use super::*;
#[test]
pub fn test_error_status_code() {
assert_eq!(
StatusCode::TableAlreadyExists,
Error::TableExists {
table: "some_table".to_string(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
msg: String::default(),
location: Location::generate(),
}
.status_code()
);
}
#[test]
pub fn test_errors_to_datafusion_error() {
let e: DataFusionError = Error::TableExists {

View File

@@ -20,8 +20,8 @@ use std::time::Duration;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -282,8 +282,11 @@ impl KvBackend for CachedMetaKvBackend {
_ => Err(e),
},
}
.map_err(|e| GetKvCache {
err_msg: e.to_string(),
.map_err(|e| {
GetKvCacheSnafu {
err_msg: e.to_string(),
}
.build()
});
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need

View File

@@ -36,6 +36,7 @@ use futures_util::{StreamExt, TryStreamExt};
use meta_client::client::MetaClient;
use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use session::context::{Channel, QueryContext};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
@@ -152,7 +153,11 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys)
}
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
async fn schema_names(
&self,
catalog: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>> {
let stream = self
.table_metadata_manager
.schema_manager()
@@ -163,12 +168,17 @@ impl CatalogManager for KvBackendCatalogManager {
.map_err(BoxedError::new)
.context(ListSchemasSnafu { catalog })?;
keys.extend(self.system_catalog.schema_names());
keys.extend(self.system_catalog.schema_names(query_ctx));
Ok(keys.into_iter().collect())
}
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
async fn table_names(
&self,
catalog: &str,
schema: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>> {
let stream = self
.table_metadata_manager
.table_name_manager()
@@ -181,7 +191,7 @@ impl CatalogManager for KvBackendCatalogManager {
.into_iter()
.map(|(k, _)| k)
.collect::<Vec<_>>();
tables.extend_from_slice(&self.system_catalog.table_names(schema));
tables.extend_from_slice(&self.system_catalog.table_names(schema, query_ctx));
Ok(tables.into_iter().collect())
}
@@ -194,8 +204,13 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
if self.system_catalog.schema_exists(schema) {
async fn schema_exists(
&self,
catalog: &str,
schema: &str,
query_ctx: Option<&QueryContext>,
) -> Result<bool> {
if self.system_catalog.schema_exists(schema, query_ctx) {
return Ok(true);
}
@@ -206,8 +221,14 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
if self.system_catalog.table_exists(schema, table) {
async fn table_exists(
&self,
catalog: &str,
schema: &str,
table: &str,
query_ctx: Option<&QueryContext>,
) -> Result<bool> {
if self.system_catalog.table_exists(schema, table, query_ctx) {
return Ok(true);
}
@@ -225,10 +246,12 @@ impl CatalogManager for KvBackendCatalogManager {
catalog_name: &str,
schema_name: &str,
table_name: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Option<TableRef>> {
if let Some(table) = self
.system_catalog
.table(catalog_name, schema_name, table_name)
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
if let Some(table) =
self.system_catalog
.table(catalog_name, schema_name, table_name, query_ctx)
{
return Ok(Some(table));
}
@@ -236,23 +259,45 @@ impl CatalogManager for KvBackendCatalogManager {
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_cache",
})?;
table_cache
if let Some(table) = table_cache
.get_by_ref(&TableName {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
table_name: table_name.to_string(),
})
.await
.context(GetTableCacheSnafu)
.context(GetTableCacheSnafu)?
{
return Ok(Some(table));
}
if channel == Channel::Postgres {
// falldown to pg_catalog
if let Some(table) =
self.system_catalog
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
{
return Ok(Some(table));
}
}
return Ok(None);
}
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
query_ctx: Option<&'a QueryContext>,
) -> BoxStream<'a, Result<TableRef>> {
let sys_tables = try_stream!({
// System tables
let sys_table_names = self.system_catalog.table_names(schema);
let sys_table_names = self.system_catalog.table_names(schema, query_ctx);
for table_name in sys_table_names {
if let Some(table) = self.system_catalog.table(catalog, schema, &table_name) {
if let Some(table) =
self.system_catalog
.table(catalog, schema, &table_name, query_ctx)
{
yield table;
}
}
@@ -313,25 +358,34 @@ struct SystemCatalog {
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
// system_schema_provier for default catalog
// system_schema_provider for default catalog
information_schema_provider: Arc<InformationSchemaProvider>,
pg_catalog_provider: Arc<PGCatalogProvider>,
backend: KvBackendRef,
}
impl SystemCatalog {
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
fn schema_names(&self) -> Vec<String> {
vec![
INFORMATION_SCHEMA_NAME.to_string(),
PG_CATALOG_NAME.to_string(),
]
fn schema_names(&self, query_ctx: Option<&QueryContext>) -> Vec<String> {
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
match channel {
// pg_catalog only visible under postgres protocol
Channel::Postgres => vec![
INFORMATION_SCHEMA_NAME.to_string(),
PG_CATALOG_NAME.to_string(),
],
_ => {
vec![INFORMATION_SCHEMA_NAME.to_string()]
}
}
}
fn table_names(&self, schema: &str) -> Vec<String> {
fn table_names(&self, schema: &str, query_ctx: Option<&QueryContext>) -> Vec<String> {
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
match schema {
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
PG_CATALOG_NAME if channel == Channel::Postgres => {
self.pg_catalog_provider.table_names()
}
DEFAULT_SCHEMA_NAME => {
vec![NUMBERS_TABLE_NAME.to_string()]
}
@@ -339,23 +393,35 @@ impl SystemCatalog {
}
}
fn schema_exists(&self, schema: &str) -> bool {
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
fn schema_exists(&self, schema: &str, query_ctx: Option<&QueryContext>) -> bool {
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
match channel {
Channel::Postgres => schema == PG_CATALOG_NAME || schema == INFORMATION_SCHEMA_NAME,
_ => schema == INFORMATION_SCHEMA_NAME,
}
}
fn table_exists(&self, schema: &str, table: &str) -> bool {
fn table_exists(&self, schema: &str, table: &str, query_ctx: Option<&QueryContext>) -> bool {
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
if schema == INFORMATION_SCHEMA_NAME {
self.information_schema_provider.table(table).is_some()
} else if schema == DEFAULT_SCHEMA_NAME {
table == NUMBERS_TABLE_NAME
} else if schema == PG_CATALOG_NAME {
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
self.pg_catalog_provider.table(table).is_some()
} else {
false
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Option<TableRef> {
fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
query_ctx: Option<&QueryContext>,
) -> Option<TableRef> {
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
if schema == INFORMATION_SCHEMA_NAME {
let information_schema_provider =
self.catalog_cache.get_with_by_ref(catalog, move || {
@@ -366,7 +432,7 @@ impl SystemCatalog {
))
});
information_schema_provider.table(table_name)
} else if schema == PG_CATALOG_NAME {
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
if catalog == DEFAULT_CATALOG_NAME {
self.pg_catalog_provider.table(table_name)
} else {

View File

@@ -20,8 +20,10 @@ use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::CreateTableExpr;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
use futures::future::BoxFuture;
use futures_util::stream::BoxStream;
use session::context::QueryContext;
use table::metadata::TableId;
use table::TableRef;
@@ -44,15 +46,35 @@ pub trait CatalogManager: Send + Sync {
async fn catalog_names(&self) -> Result<Vec<String>>;
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
async fn schema_names(
&self,
catalog: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>>;
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
async fn table_names(
&self,
catalog: &str,
schema: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>>;
async fn catalog_exists(&self, catalog: &str) -> Result<bool>;
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool>;
async fn schema_exists(
&self,
catalog: &str,
schema: &str,
query_ctx: Option<&QueryContext>,
) -> Result<bool>;
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
async fn table_exists(
&self,
catalog: &str,
schema: &str,
table: &str,
query_ctx: Option<&QueryContext>,
) -> Result<bool>;
/// Returns the table by catalog, schema and table name.
async fn table(
@@ -60,10 +82,25 @@ pub trait CatalogManager: Send + Sync {
catalog: &str,
schema: &str,
table_name: &str,
query_ctx: Option<&QueryContext>,
) -> Result<Option<TableRef>>;
/// Returns all tables with a stream by catalog and schema.
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
query_ctx: Option<&'a QueryContext>,
) -> BoxStream<'a, Result<TableRef>>;
/// Check if `schema` is a reserved schema name
fn is_reserved_schema_name(&self, schema: &str) -> bool {
// We have to check whether a schema name is reserved before create schema.
// We need this rather than use schema_exists directly because `pg_catalog` is
// only visible via postgres protocol. So if we don't check, a mysql client may
// create a schema named `pg_catalog` which is somehow malformed.
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
}
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;

View File

@@ -26,6 +26,7 @@ use common_catalog::consts::{
use common_meta::key::flow::FlowMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use futures_util::stream::BoxStream;
use session::context::QueryContext;
use snafu::OptionExt;
use table::TableRef;
@@ -53,7 +54,11 @@ impl CatalogManager for MemoryCatalogManager {
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
}
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
async fn schema_names(
&self,
catalog: &str,
_query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>> {
Ok(self
.catalogs
.read()
@@ -67,7 +72,12 @@ impl CatalogManager for MemoryCatalogManager {
.collect())
}
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
async fn table_names(
&self,
catalog: &str,
schema: &str,
_query_ctx: Option<&QueryContext>,
) -> Result<Vec<String>> {
Ok(self
.catalogs
.read()
@@ -87,11 +97,22 @@ impl CatalogManager for MemoryCatalogManager {
self.catalog_exist_sync(catalog)
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
async fn schema_exists(
&self,
catalog: &str,
schema: &str,
_query_ctx: Option<&QueryContext>,
) -> Result<bool> {
self.schema_exist_sync(catalog, schema)
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
async fn table_exists(
&self,
catalog: &str,
schema: &str,
table: &str,
_query_ctx: Option<&QueryContext>,
) -> Result<bool> {
let catalogs = self.catalogs.read().unwrap();
Ok(catalogs
.get(catalog)
@@ -108,6 +129,7 @@ impl CatalogManager for MemoryCatalogManager {
catalog: &str,
schema: &str,
table_name: &str,
_query_ctx: Option<&QueryContext>,
) -> Result<Option<TableRef>> {
let result = try {
self.catalogs
@@ -121,7 +143,12 @@ impl CatalogManager for MemoryCatalogManager {
Ok(result)
}
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
_query_ctx: Option<&QueryContext>,
) -> BoxStream<'a, Result<TableRef>> {
let catalogs = self.catalogs.read().unwrap();
let Some(schemas) = catalogs.get(catalog) else {
@@ -371,11 +398,12 @@ mod tests {
DEFAULT_CATALOG_NAME,
DEFAULT_SCHEMA_NAME,
NUMBERS_TABLE_NAME,
None,
)
.await
.unwrap()
.unwrap();
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, None);
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
assert_eq!(tables.len(), 1);
assert_eq!(
@@ -384,7 +412,12 @@ mod tests {
);
assert!(catalog_list
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
.table(
DEFAULT_CATALOG_NAME,
DEFAULT_SCHEMA_NAME,
"not_exists",
None
)
.await
.unwrap()
.is_none());
@@ -411,7 +444,7 @@ mod tests {
};
catalog.register_table_sync(register_table_req).unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
.await
.unwrap()
.is_some());
@@ -423,7 +456,7 @@ mod tests {
};
catalog.deregister_table_sync(deregister_table_req).unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
.await
.unwrap()
.is_none());

View File

@@ -257,8 +257,8 @@ impl InformationSchemaColumnsBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices;

View File

@@ -212,8 +212,8 @@ impl InformationSchemaKeyColumnUsageBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let mut primary_constraints = vec![];

View File

@@ -240,9 +240,9 @@ impl InformationSchemaPartitionsBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let table_info_stream = catalog_manager
.tables(&catalog_name, &schema_name)
.tables(&catalog_name, &schema_name, None)
.try_filter_map(|t| async move {
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary {

View File

@@ -176,9 +176,9 @@ impl InformationSchemaRegionPeersBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let table_id_stream = catalog_manager
.tables(&catalog_name, &schema_name)
.tables(&catalog_name, &schema_name, None)
.try_filter_map(|t| async move {
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary {

View File

@@ -171,7 +171,7 @@ impl InformationSchemaSchemataBuilder {
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
table_metadata_manager
.schema_manager()

View File

@@ -176,8 +176,8 @@ impl InformationSchemaTableConstraintsBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices;

View File

@@ -234,8 +234,8 @@ impl InformationSchemaTablesBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();

View File

@@ -192,8 +192,8 @@ impl InformationSchemaViewsBuilder {
.context(CastManagerSnafu)?
.view_info_cache()?;
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();

View File

@@ -18,15 +18,16 @@ mod pg_namespace;
mod table_names;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
use std::sync::{Arc, LazyLock, Weak};
use common_catalog::consts::{self, PG_CATALOG_NAME};
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, PG_CATALOG_NAME};
use datatypes::schema::ColumnSchema;
use lazy_static::lazy_static;
use paste::paste;
use pg_catalog_memory_table::get_schema_columns;
use pg_class::PGClass;
use pg_namespace::PGNamespace;
use session::context::{Channel, QueryContext};
use table::TableRef;
pub use table_names::*;
@@ -142,3 +143,12 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
&self.catalog_name
}
}
/// Provide query context to call the [`CatalogManager`]'s method.
static PG_QUERY_CTX: LazyLock<QueryContext> = LazyLock::new(|| {
QueryContext::with_channel(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Channel::Postgres)
});
fn query_ctx() -> Option<&'static QueryContext> {
Some(&PG_QUERY_CTX)
}

View File

@@ -32,7 +32,7 @@ use store_api::storage::ScanRequest;
use table::metadata::TableType;
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
use super::{OID_COLUMN_NAME, PG_CLASS};
use super::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -202,8 +202,11 @@ impl PGClassBuilder {
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
for schema_name in catalog_manager
.schema_names(&catalog_name, query_ctx())
.await?
{
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, query_ctx());
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
self.add_class(

View File

@@ -31,7 +31,7 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use super::{PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
use super::{query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -180,7 +180,10 @@ impl PGNamespaceBuilder {
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
for schema_name in catalog_manager
.schema_names(&catalog_name, query_ctx())
.await?
{
self.add_namespace(&predicates, &schema_name);
}
self.finish()

View File

@@ -23,7 +23,7 @@ use datafusion::datasource::view::ViewTable;
use datafusion::datasource::{provider_as_source, TableProvider};
use datafusion::logical_expr::TableSource;
use itertools::Itertools;
use session::context::QueryContext;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableType;
use table::table::adapter::DfTableProviderAdapter;
@@ -45,6 +45,7 @@ pub struct DfTableSourceProvider {
disallow_cross_catalog_query: bool,
default_catalog: String,
default_schema: String,
query_ctx: QueryContextRef,
plan_decoder: SubstraitPlanDecoderRef,
enable_ident_normalization: bool,
}
@@ -53,7 +54,7 @@ impl DfTableSourceProvider {
pub fn new(
catalog_manager: CatalogManagerRef,
disallow_cross_catalog_query: bool,
query_ctx: &QueryContext,
query_ctx: QueryContextRef,
plan_decoder: SubstraitPlanDecoderRef,
enable_ident_normalization: bool,
) -> Self {
@@ -63,6 +64,7 @@ impl DfTableSourceProvider {
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog().to_owned(),
default_schema: query_ctx.current_schema(),
query_ctx,
plan_decoder,
enable_ident_normalization,
}
@@ -71,8 +73,7 @@ impl DfTableSourceProvider {
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
if self.disallow_cross_catalog_query {
match &table_ref {
TableReference::Bare { .. } => (),
TableReference::Partial { .. } => {}
TableReference::Bare { .. } | TableReference::Partial { .. } => {}
TableReference::Full {
catalog, schema, ..
} => {
@@ -107,7 +108,7 @@ impl DfTableSourceProvider {
let table = self
.catalog_manager
.table(catalog_name, schema_name, table_name)
.table(catalog_name, schema_name, table_name, Some(&self.query_ctx))
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(catalog_name, schema_name, table_name),
@@ -210,12 +211,12 @@ mod tests {
#[test]
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let query_ctx = Arc::new(QueryContext::with("greptime", "public"));
let table_provider = DfTableSourceProvider::new(
MemoryCatalogManager::with_default_setup(),
true,
query_ctx,
query_ctx.clone(),
DummyDecoder::arc(),
true,
);
@@ -308,7 +309,7 @@ mod tests {
#[tokio::test]
async fn test_resolve_view() {
let query_ctx = &QueryContext::with("greptime", "public");
let query_ctx = Arc::new(QueryContext::with("greptime", "public"));
let backend = Arc::new(MemoryKvBackend::default());
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
.add_cache_registry(CacheRegistryBuilder::default().build());
@@ -344,8 +345,13 @@ mod tests {
.await
.unwrap();
let mut table_provider =
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
let mut table_provider = DfTableSourceProvider::new(
catalog_manager,
true,
query_ctx.clone(),
MockDecoder::arc(),
true,
);
// View not found
let table_ref = TableReference::bare("not_exists_view");

View File

@@ -112,7 +112,7 @@ impl SchemaProvider for DummySchemaProvider {
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
let table = self
.catalog_manager
.table(&self.catalog_name, &self.schema_name, name)
.table(&self.catalog_name, &self.schema_name, name, None)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),

View File

@@ -37,7 +37,8 @@ use tonic::metadata::AsciiMetadataKey;
use tonic::transport::Channel;
use crate::error::{
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
ServerSnafu,
};
use crate::{from_grpc_response, Client, Result};
@@ -225,16 +226,18 @@ impl Database {
let mut client = self.client.make_flight_client()?;
let response = client.mut_inner().do_get(request).await.map_err(|e| {
let response = client.mut_inner().do_get(request).await.or_else(|e| {
let tonic_code = e.code();
let e: Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error =
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
FlightGetSnafu {
addr: client.addr().to_string(),
tonic_code,
}
});
error!(
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
client.addr(),

View File

@@ -39,13 +39,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
#[snafu(implicit)]
@@ -116,13 +109,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to parse ascii string: {}", value))]
InvalidAscii {
value: String,
@@ -138,12 +124,10 @@ impl ErrorExt for Error {
match self {
Error::IllegalFlightMessages { .. }
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. }
| Error::ClientStreaming { .. } => StatusCode::Internal,
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. }
| Error::FlowServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. }

View File

@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
use snafu::{location, ResultExt};
use snafu::ResultExt;
use crate::error::Result;
use crate::error::{FlowServerSnafu, Result};
use crate::Client;
#[derive(Debug)]
@@ -57,15 +57,10 @@ impl FlowRequester {
let response = client
.handle_create_remove(request)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)
@@ -88,15 +83,10 @@ impl FlowRequester {
let response = client
.handle_mirror_request(requests)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)

View File

@@ -38,8 +38,8 @@ use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio_stream::StreamExt;
use crate::error::{
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
MissingFieldSnafu, Result, ServerSnafu,
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
};
use crate::{metrics, Client, Error};
@@ -103,11 +103,14 @@ impl RegionRequester {
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: flight_client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error = ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.with_context(|_| FlightGetSnafu {
tonic_code,
addr: flight_client.addr().to_string(),
})
.unwrap_err();
error!(
e; "Failed to do Flight get, addr: {}, code: {}",
flight_client.addr(),

View File

@@ -21,6 +21,8 @@ mod export;
mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
mod database;
mod import;
#[allow(unused)]
mod repl;
@@ -32,6 +34,7 @@ pub use repl::Repl;
use tracing_appender::non_blocking::WorkerGuard;
use self::export::ExportCommand;
use crate::cli::import::ImportCommand;
use crate::error::Result;
use crate::options::GlobalOptions;
use crate::App;
@@ -114,6 +117,7 @@ enum SubCommand {
// Attach(AttachCommand),
Bench(BenchTableMetadataCommand),
Export(ExportCommand),
Import(ImportCommand),
}
impl SubCommand {
@@ -122,6 +126,7 @@ impl SubCommand {
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Bench(cmd) => cmd.build(guard).await,
SubCommand::Export(cmd) => cmd.build(guard).await,
SubCommand::Import(cmd) => cmd.build(guard).await,
}
}
}

119
src/cmd/src/cli/database.rs Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
pub(crate) struct DatabaseClient {
addr: String,
catalog: String,
auth_header: Option<String>,
}
impl DatabaseClient {
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
Self {
addr,
catalog,
auth_header,
}
}
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
self.sql(sql, DEFAULT_SCHEMA_NAME).await
}
/// Execute sql query.
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!("http://{}/v1/sql", self.addr);
let params = [
("db", format!("{}-{}", self.catalog, schema)),
("sql", sql.to_string()),
];
let mut request = reqwest::Client::new()
.post(&url)
.form(&params)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
}
}
/// Split at `-`.
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = match database.split_once('-') {
Some((catalog, schema)) => (catalog, schema),
None => (DEFAULT_CATALOG_NAME, database),
};
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_split_database() {
let result = split_database("catalog-schema").unwrap();
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
let result = split_database("schema").unwrap();
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
let result = split_database("catalog-*").unwrap();
assert_eq!(result, ("catalog".to_string(), None));
let result = split_database("*").unwrap();
assert_eq!(result, ("greptime".to_string(), None));
}
}

View File

@@ -13,30 +13,23 @@
// limitations under the License.
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use base64::engine::general_purpose;
use base64::Engine;
use clap::{Parser, ValueEnum};
use client::DEFAULT_SCHEMA_NAME;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_telemetry::{debug, error, info};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::{Instance, Tool};
use crate::error::{
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
};
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
type TableReference = (String, String, String);
@@ -94,26 +87,21 @@ pub struct ExportCommand {
impl ExportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = split_database(&self.database)?;
let (catalog, schema) = database::split_database(&self.database)?;
let auth_header = if let Some(basic) = &self.auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Export {
addr: self.addr.clone(),
catalog,
schema,
database_client,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
auth_header,
}),
guard,
))
@@ -121,78 +109,59 @@ impl ExportCommand {
}
pub struct Export {
addr: String,
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
output_dir: String,
parallelism: usize,
target: ExportTarget,
start_time: Option<String>,
end_time: Option<String>,
auth_header: Option<String>,
}
impl Export {
/// Execute one single sql query.
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!(
"http://{}/v1/sql?db={}-{}&sql={}",
self.addr,
self.catalog,
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
sql
);
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.output_dir).join(&self.catalog)
}
let mut request = reqwest::Client::new()
.get(&url)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
/// Iterate over all db names.
///
/// Newbie: `db_name` is catalog + schema.
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
if let Some(schema) = &self.schema {
Ok(vec![(self.catalog.clone(), schema.clone())])
} else {
let result = self.sql("SHOW DATABASES").await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn all_db_names(&self) -> Result<Vec<String>> {
let records = self
.database_client
.sql_in_public("SHOW DATABASES")
.await?
.context(EmptyResultSnafu)?;
let mut result = Vec::with_capacity(records.len());
for value in records {
let Value::String(schema) = &value[0] else {
unreachable!()
};
let mut result = Vec::with_capacity(records.len());
for value in records {
let Value::String(schema) = &value[0] else {
unreachable!()
};
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
result.push((self.catalog.clone(), schema.clone()));
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
Ok(result)
if schema == common_catalog::consts::PG_CATALOG_NAME {
continue;
}
result.push(schema.clone());
}
Ok(result)
}
/// Return a list of [`TableReference`] to be exported.
@@ -201,7 +170,11 @@ impl Export {
&self,
catalog: &str,
schema: &str,
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
) -> Result<(
Vec<TableReference>,
Vec<TableReference>,
Vec<TableReference>,
)> {
// Puts all metric table first
let sql = format!(
"SELECT table_catalog, table_schema, table_name \
@@ -210,15 +183,16 @@ impl Export {
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'"
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let mut metric_physical_tables = HashSet::with_capacity(records.len());
for value in records {
let mut t = Vec::with_capacity(3);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
@@ -226,100 +200,142 @@ impl Export {
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
}
// TODO: SQL injection hurts
let sql = format!(
"SELECT table_catalog, table_schema, table_name \
"SELECT table_catalog, table_schema, table_name, table_type \
FROM information_schema.tables \
WHERE table_type = \'BASE TABLE\' \
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'",
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
debug!("Fetched table list: {:?}", records);
debug!("Fetched table/view list: {:?}", records);
if records.is_empty() {
return Ok((vec![], vec![]));
return Ok((vec![], vec![], vec![]));
}
let mut remaining_tables = Vec::with_capacity(records.len());
let mut views = Vec::new();
for value in records {
let mut t = Vec::with_capacity(3);
let mut t = Vec::with_capacity(4);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
}
let table = (t[0].clone(), t[1].clone(), t[2].clone());
let table_type = t[3].as_str();
// Ignores the physical table
if !metric_physical_tables.contains(&table) {
remaining_tables.push(table);
if table_type == "VIEW" {
views.push(table);
} else {
remaining_tables.push(table);
}
}
}
Ok((
metric_physical_tables.into_iter().collect(),
remaining_tables,
views,
))
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!(
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
catalog, schema, table
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn show_create(
&self,
show_type: &str,
catalog: &str,
schema: &str,
table: Option<&str>,
) -> Result<String> {
let sql = match table {
Some(table) => format!(
r#"SHOW CREATE {} "{}"."{}"."{}""#,
show_type, catalog, schema, table
),
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
};
let Value::String(create_table) = &records[0][1] else {
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let Value::String(create) = &records[0][1] else {
unreachable!()
};
Ok(format!("{};\n", create_table))
Ok(format!("{};\n", create))
}
async fn export_create_database(&self) -> Result<()> {
let timer = Instant::now();
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
for schema in db_names {
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let file = db_dir.join("create_database.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
let create_database = self
.show_create("DATABASE", &self.catalog, &schema, None)
.await?;
file.write_all(create_database.as_bytes())
.await
.context(FileIoSnafu)?;
}
let elapsed = timer.elapsed();
info!("Success {db_count} jobs, cost: {elapsed:?}");
Ok(())
}
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let (metric_physical_tables, remaining_tables) =
self.get_table_list(&catalog, &schema).await?;
let table_count = metric_physical_tables.len() + remaining_tables.len();
let output_dir = Path::new(&self.output_dir)
.join(&catalog)
.join(format!("{schema}/"));
tokio::fs::create_dir_all(&output_dir)
let (metric_physical_tables, remaining_tables, views) =
self.get_table_list(&self.catalog, &schema).await?;
let table_count =
metric_physical_tables.len() + remaining_tables.len() + views.len();
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let output_file = Path::new(&output_dir).join("create_tables.sql");
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
let file = db_dir.join("create_tables.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
match self.show_create_table(&c, &s, &t).await {
Err(e) => {
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
}
Ok(create_table) => {
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
}
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
for (c, s, v) in views {
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
file.write_all(create_view.as_bytes())
.await
.context(FileIoSnafu)?;
}
info!(
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
output_dir.to_string_lossy()
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
self.catalog,
db_dir.to_string_lossy()
);
Ok::<(), Error>(())
@@ -332,14 +348,14 @@ impl Export {
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
error!(e; "export schema job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
Ok(())
}
@@ -347,17 +363,15 @@ impl Export {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let output_dir = Path::new(&self.output_dir)
.join(&catalog)
.join(format!("{schema}/"));
tokio::fs::create_dir_all(&output_dir)
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
@@ -379,30 +393,31 @@ impl Export {
let sql = format!(
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
catalog,
self.catalog,
schema,
output_dir.to_str().unwrap(),
db_dir.to_str().unwrap(),
with_options
);
info!("Executing sql: {sql}");
self.sql(&sql).await?;
self.database_client.sql_in_public(&sql).await?;
info!(
"Finished exporting {catalog}.{schema} data into path: {}",
output_dir.to_string_lossy()
"Finished exporting {}.{schema} data into path: {}",
self.catalog,
db_dir.to_string_lossy()
);
// The export copy from sql
let copy_from_file = output_dir.join("copy_from.sql");
let copy_from_file = db_dir.join("copy_from.sql");
let mut writer =
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
let copy_database_from_sql = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
catalog,
self.catalog,
schema,
output_dir.to_str().unwrap()
db_dir.to_str().unwrap()
);
writer
.write(copy_database_from_sql.as_bytes())
@@ -410,7 +425,7 @@ impl Export {
.context(FileIoSnafu)?;
writer.flush().await.context(FileIoSnafu)?;
info!("Finished exporting {catalog}.{schema} copy_from.sql");
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
Ok::<(), Error>(())
})
@@ -429,20 +444,23 @@ impl Export {
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
Ok(())
}
}
#[allow(deprecated)]
#[async_trait]
impl Tool for Export {
async fn do_work(&self) -> Result<()> {
match self.target {
ExportTarget::Schema => self.export_create_table().await,
ExportTarget::Schema => {
self.export_create_database().await?;
self.export_create_table().await
}
ExportTarget::Data => self.export_database_data().await,
ExportTarget::All => {
self.export_create_database().await?;
self.export_create_table().await?;
self.export_database_data().await
}
@@ -450,20 +468,6 @@ impl Tool for Export {
}
}
/// Split at `-`.
fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = match database.split_once('-') {
Some((catalog, schema)) => (catalog, schema),
None => (DEFAULT_CATALOG_NAME, database),
};
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use clap::Parser;
@@ -471,26 +475,10 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::logging::LoggingOptions;
use crate::cli::export::split_database;
use crate::error::Result as CmdResult;
use crate::options::GlobalOptions;
use crate::{cli, standalone, App};
#[test]
fn test_split_database() {
let result = split_database("catalog-schema").unwrap();
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
let result = split_database("schema").unwrap();
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
let result = split_database("catalog-*").unwrap();
assert_eq!(result, ("catalog".to_string(), None));
let result = split_database("*").unwrap();
assert_eq!(result, ("greptime".to_string(), None));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
let output_dir = tempfile::tempdir().unwrap();

218
src/cmd/src/cli/import.rs Normal file
View File

@@ -0,0 +1,218 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_telemetry::{error, info, warn};
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
#[derive(Debug, Default, Clone, ValueEnum)]
enum ImportTarget {
/// Import all table schemas into the database.
Schema,
/// Import all table data into the database.
Data,
/// Export all table schemas and data at once.
#[default]
All,
}
#[derive(Debug, Default, Parser)]
pub struct ImportCommand {
/// Server address to connect
#[clap(long)]
addr: String,
/// Directory of the data. E.g.: /tmp/greptimedb-backup
#[clap(long)]
input_dir: String,
/// The name of the catalog to import.
#[clap(long, default_value = "greptime-*")]
database: String,
/// Parallelism of the import.
#[clap(long, short = 'j', default_value = "1")]
import_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
max_retry: usize,
/// Things to export
#[clap(long, short = 't', value_enum, default_value = "all")]
target: ImportTarget,
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
}
impl ImportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Import {
catalog,
schema,
database_client,
input_dir: self.input_dir.clone(),
parallelism: self.import_jobs,
target: self.target.clone(),
}),
guard,
))
}
}
pub struct Import {
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
input_dir: String,
parallelism: usize,
target: ImportTarget,
}
impl Import {
async fn import_create_table(&self) -> Result<()> {
// Use default db to creates other dbs
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
.await?;
self.do_sql_job("create_tables.sql", None).await
}
async fn import_database_data(&self) -> Result<()> {
self.do_sql_job("copy_from.sql", None).await
}
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let database_input_dir = self.catalog_path().join(&schema);
let sql_file = database_input_dir.join(filename);
let sql = tokio::fs::read_to_string(sql_file)
.await
.context(FileIoSnafu)?;
if sql.is_empty() {
info!("Empty `{filename}` {database_input_dir:?}");
} else {
let db = exec_db.unwrap_or(&schema);
self.database_client.sql(&sql, db).await?;
info!("Imported `{filename}` for database {schema}");
}
Ok::<(), Error>(())
})
}
let success = futures::future::join_all(tasks)
.await
.into_iter()
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "import {filename} job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
Ok(())
}
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.input_dir).join(&self.catalog)
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
// Get all database names in the input directory.
// The directory structure should be like:
// /tmp/greptimedb-backup
// ├── greptime-1
// │ ├── db1
// │ └── db2
async fn all_db_names(&self) -> Result<Vec<String>> {
let mut db_names = vec![];
let path = self.catalog_path();
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
let path = entry.path();
if path.is_dir() {
let db_name = match path.file_name() {
Some(name) => name.to_string_lossy().to_string(),
None => {
warn!("Failed to get the file name of {:?}", path);
continue;
}
};
db_names.push(db_name);
}
}
Ok(db_names)
}
}
#[async_trait]
impl Tool for Import {
async fn do_work(&self) -> Result<()> {
match self.target {
ImportTarget::Schema => self.import_create_table().await,
ImportTarget::Data => self.import_database_data().await,
ImportTarget::All => {
self.import_create_table().await?;
self.import_database_data().await
}
}
}
}

View File

@@ -31,13 +31,6 @@ pub enum Error {
source: common_meta::error::Error,
},
#[snafu(display("Failed to iter stream"))]
IterStream {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to init DDL manager"))]
InitDdlManager {
#[snafu(implicit)]
@@ -237,13 +230,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to start catalog manager"))]
StartCatalogManager {
#[snafu(implicit)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
ConnectEtcd {
etcd_addr: String,
@@ -253,14 +239,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to connect server at {addr}"))]
ConnectServer {
addr: String,
source: client::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
@@ -278,12 +256,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Expect data from output, but got another thing"))]
NotDataFromOutput {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Empty result from output"))]
EmptyResult {
#[snafu(implicit)]
@@ -346,13 +318,12 @@ pub enum Error {
source: meta_client::error::Error,
},
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
TonicTransport {
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
SchemaNotFound {
catalog: String,
schema: String,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: tonic::transport::Error,
msg: Option<String>,
},
}
@@ -370,18 +341,16 @@ impl ErrorExt for Error {
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. }
| Error::InitMetadata { source, .. }
| Error::InitDdlManager { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::ConnectServer { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::InitTimezone { .. }
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
@@ -399,7 +368,6 @@ impl ErrorExt for Error {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
StatusCode::Unexpected
@@ -414,7 +382,7 @@ impl ErrorExt for Error {
source.status_code()
}
Error::MetaClientInit { source, .. } => source.status_code(),
Error::TonicTransport { .. } => StatusCode::Internal,
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
}
}

View File

@@ -141,6 +141,8 @@ pub struct StandaloneOptions {
pub region_engine: Vec<RegionEngineConfig>,
pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub init_regions_in_background: bool,
pub init_regions_parallelism: usize,
}
impl Default for StandaloneOptions {
@@ -168,6 +170,8 @@ impl Default for StandaloneOptions {
RegionEngineConfig::File(FileEngineConfig::default()),
],
tracing: TracingOptions::default(),
init_regions_in_background: false,
init_regions_parallelism: 16,
}
}
}
@@ -218,6 +222,9 @@ impl StandaloneOptions {
storage: cloned_opts.storage,
region_engine: cloned_opts.region_engine,
grpc: cloned_opts.grpc,
init_regions_in_background: cloned_opts.init_regions_in_background,
init_regions_parallelism: cloned_opts.init_regions_parallelism,
mode: Mode::Standalone,
..Default::default()
}
}

View File

@@ -81,6 +81,7 @@ fn test_load_datanode_example_config() {
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
selector_result_cache_size: ReadableSize::mb(512),
max_background_jobs: 4,
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()

View File

@@ -1,46 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid full table name: {}", table_name))]
InvalidFullTableName {
table_name: String,
#[snafu(implicit)]
location: Location,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidFullTableName { .. } => StatusCode::Unexpected,
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -15,7 +15,6 @@
use consts::DEFAULT_CATALOG_NAME;
pub mod consts;
pub mod error;
#[inline]
pub fn format_schema_name(catalog: &str, schema: &str) -> String {

View File

@@ -7,6 +7,10 @@ license.workspace = true
[lints]
workspace = true
[features]
default = ["geo"]
geo = ["geohash", "h3o"]
[dependencies]
api.workspace = true
arc-swap = "1.0"
@@ -23,6 +27,8 @@ common-time.workspace = true
common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
geohash = { version = "0.13", optional = true }
h3o = { version = "0.6", optional = true }
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true

View File

@@ -116,6 +116,10 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
SystemFunction::register(&function_registry);
TableFunction::register(&function_registry);
// Geo functions
#[cfg(feature = "geo")]
crate::scalars::geo::GeoFunctions::register(&function_registry);
Arc::new(function_registry)
});

View File

@@ -15,6 +15,8 @@
pub mod aggregate;
pub(crate) mod date;
pub mod expression;
#[cfg(feature = "geo")]
pub mod geo;
pub mod matches;
pub mod math;
pub mod numpy;

View File

@@ -0,0 +1,31 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod geohash;
mod h3;
use geohash::GeohashFunction;
use h3::H3Function;
use crate::function_registry::FunctionRegistry;
pub(crate) struct GeoFunctions;
impl GeoFunctions {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(GeohashFunction));
registry.register(Arc::new(H3Function));
}
}

View File

@@ -0,0 +1,135 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use geohash::Coord;
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
/// Function that return geohash string for a given geospatial coordinate.
#[derive(Clone, Debug, Default)]
pub struct GeohashFunction;
const NAME: &str = "geohash";
impl Function for GeohashFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
for coord_type in &[
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
] {
for resolution_type in &[
ConcreteDataType::int8_datatype(),
ConcreteDataType::int16_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype(),
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint32_datatype(),
ConcreteDataType::uint64_datatype(),
] {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
// resolution
resolution_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 3,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 3, provided : {}",
columns.len()
),
}
);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
let resolution_vec = &columns[2];
let size = lat_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lon = lon_vec.get(i).as_f64_lossy();
let r = match resolution_vec.get(i) {
Value::Int8(v) => v as usize,
Value::Int16(v) => v as usize,
Value::Int32(v) => v as usize,
Value::Int64(v) => v as usize,
Value::UInt8(v) => v as usize,
Value::UInt16(v) => v as usize,
Value::UInt32(v) => v as usize,
Value::UInt64(v) => v as usize,
_ => unreachable!(),
};
let result = match (lat, lon) {
(Some(lat), Some(lon)) => {
let coord = Coord { x: lon, y: lat };
let encoded = geohash::encode(coord, r)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("Geohash error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
Some(encoded)
}
_ => None,
};
results.push(result.as_deref());
}
Ok(results.to_vector())
}
}
impl fmt::Display for GeohashFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME)
}
}

View File

@@ -0,0 +1,145 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use h3o::{LatLng, Resolution};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
/// Function that returns [h3] encoding string for a given geospatial coordinate.
///
/// [h3]: https://h3geo.org/
#[derive(Clone, Debug, Default)]
pub struct H3Function;
const NAME: &str = "h3";
impl Function for H3Function {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
for coord_type in &[
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
] {
for resolution_type in &[
ConcreteDataType::int8_datatype(),
ConcreteDataType::int16_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype(),
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint32_datatype(),
ConcreteDataType::uint64_datatype(),
] {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
// resolution
resolution_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 3,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 3, provided : {}",
columns.len()
),
}
);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
let resolution_vec = &columns[2];
let size = lat_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lon = lon_vec.get(i).as_f64_lossy();
let r = match resolution_vec.get(i) {
Value::Int8(v) => v as u8,
Value::Int16(v) => v as u8,
Value::Int32(v) => v as u8,
Value::Int64(v) => v as u8,
Value::UInt8(v) => v,
Value::UInt16(v) => v as u8,
Value::UInt32(v) => v as u8,
Value::UInt64(v) => v as u8,
_ => unreachable!(),
};
let result = match (lat, lon) {
(Some(lat), Some(lon)) => {
let coord = LatLng::new(lat, lon)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
let r = Resolution::try_from(r)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?;
let encoded = coord.to_cell(r).to_string();
Some(encoded)
}
_ => None,
};
results.push(result.as_deref());
}
Ok(results.to_vector())
}
}
impl fmt::Display for H3Function {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME)
}
}

View File

@@ -64,12 +64,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to create vector"))]
CreateVector {
#[snafu(implicit)]
@@ -137,7 +131,6 @@ impl ErrorExt for Error {
Error::DuplicatedTimestampColumn { .. }
| Error::DuplicatedColumnName { .. }
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
Error::CreateVector { .. } => StatusCode::InvalidArguments,
Error::MissingField { .. } => StatusCode::InvalidArguments,
Error::InvalidColumnDef { source, .. } => source.status_code(),

View File

@@ -24,7 +24,7 @@ use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteKey;
use crate::key::view_info::ViewInfoKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
/// KvBackend cache invalidator
#[async_trait::async_trait]

View File

@@ -131,7 +131,7 @@ impl AlterLogicalTablesProcedure {
let phy_raw_schemas = future::join_all(alter_region_tasks)
.await
.into_iter()
.map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.map(|res| res.map(|mut res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.collect::<Result<Vec<_>>>()?;
if phy_raw_schemas.is_empty() {

View File

@@ -157,7 +157,7 @@ impl CreateLogicalTablesProcedure {
let phy_raw_schemas = join_all(create_region_tasks)
.await
.into_iter()
.map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.map(|res| res.map(|mut res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.collect::<Result<Vec<_>>>()?;
if phy_raw_schemas.is_empty() {

View File

@@ -15,12 +15,12 @@
use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_procedure::error::Error as ProcedureError;
use snafu::{ensure, location, OptionExt};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
use table::metadata::TableId;
use crate::ddl::DetectingRegion;
use crate::error::{Error, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::key::table_name::TableNameKey;
use crate::key::TableMetadataManagerRef;
use crate::peer::Peer;
@@ -32,11 +32,9 @@ use crate::ClusterId;
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
move |err| {
if !err.is_retry_later() {
return Error::OperateDatanode {
location: location!(),
peer: datanode,
source: BoxedError::new(err),
};
return Err::<(), BoxedError>(BoxedError::new(err))
.context(OperateDatanodeSnafu { peer: datanode })
.unwrap_err();
}
err
}

View File

@@ -21,7 +21,7 @@ use common_macro::stack_trace_debug;
use common_wal::options::WalOptions;
use serde_json::error::Error as JsonError;
use snafu::{Location, Snafu};
use store_api::storage::{RegionId, RegionNumber};
use store_api::storage::RegionId;
use table::metadata::TableId;
use crate::peer::Peer;
@@ -49,20 +49,6 @@ pub enum Error {
region_id: RegionId,
},
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
InvalidTxnResult {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid engine type: {}", engine_type))]
InvalidEngineType {
engine_type: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to connect to Etcd"))]
ConnectEtcd {
#[snafu(source)]
@@ -95,15 +81,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Sequence out of range: {}, start={}, step={}", name, start, step))]
SequenceOutOfRange {
name: String,
start: u64,
step: u64,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unexpected sequence value: {}", err_msg))]
UnexpectedSequenceValue {
err_msg: String,
@@ -327,13 +304,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Catalog already exists, catalog: {}", catalog))]
CatalogAlreadyExists {
catalog: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Schema already exists, catalog:{}, schema: {}", catalog, schema))]
SchemaAlreadyExists {
catalog: String,
@@ -385,15 +355,8 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to rename table, reason: {}", reason))]
RenameTable {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid table metadata, err: {}", err_msg))]
InvalidTableMetadata {
#[snafu(display("Invalid metadata, err: {}", err_msg))]
InvalidMetadata {
err_msg: String,
#[snafu(implicit)]
location: Location,
@@ -423,27 +386,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Failed to move region {} in table {}, err: {}",
region,
table_id,
err_msg
))]
MoveRegion {
table_id: TableId,
region: RegionNumber,
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid catalog value"))]
InvalidCatalogValue {
source: common_catalog::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("External error"))]
External {
#[snafu(implicit)]
@@ -612,13 +554,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Delimiter not found, key: {}", key))]
DelimiterNotFound {
key: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
MismatchPrefix {
prefix: String,
@@ -702,15 +637,12 @@ impl ErrorExt for Error {
| ParseOption { .. }
| RouteInfoCorrupted { .. }
| InvalidProtoMsg { .. }
| InvalidTableMetadata { .. }
| MoveRegion { .. }
| InvalidMetadata { .. }
| Unexpected { .. }
| TableInfoNotFound { .. }
| NextSequence { .. }
| SequenceOutOfRange { .. }
| UnexpectedSequenceValue { .. }
| InvalidHeartbeatResponse { .. }
| InvalidTxnResult { .. }
| EncodeJson { .. }
| DecodeJson { .. }
| PayloadNotExist { .. }
@@ -734,22 +666,17 @@ impl ErrorExt for Error {
| MetadataCorruption { .. }
| StrFromUtf8 { .. } => StatusCode::Unexpected,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } | RenameTable { .. } => {
StatusCode::Internal
}
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,
ProcedureNotFound { .. }
| InvalidViewInfo { .. }
| PrimaryKeyNotFound { .. }
| CatalogAlreadyExists { .. }
| EmptyKey { .. }
| InvalidEngineType { .. }
| AlterLogicalTablesInvalidArguments { .. }
| CreateLogicalTablesInvalidArguments { .. }
| MismatchPrefix { .. }
| DelimiterNotFound { .. }
| TlsConfig { .. } => StatusCode::InvalidArguments,
FlowNotFound { .. } => StatusCode::FlowNotFound,
@@ -767,7 +694,6 @@ impl ErrorExt for Error {
OperateDatanode { source, .. } => source.status_code(),
Table { source, .. } => source.status_code(),
RetryLater { source, .. } => source.status_code(),
InvalidCatalogValue { source, .. } => source.status_code(),
ConvertAlterTableRequest { source, .. } => source.status_code(),
ParseProcedureId { .. }

View File

@@ -211,7 +211,7 @@ lazy_static! {
}
/// The key of metadata.
pub trait MetaKey<'a, T> {
pub trait MetadataKey<'a, T> {
fn to_bytes(&self) -> Vec<u8>;
fn from_bytes(bytes: &'a [u8]) -> Result<T>;
@@ -226,7 +226,7 @@ impl From<Vec<u8>> for BytesAdapter {
}
}
impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
fn to_bytes(&self) -> Vec<u8> {
self.0.clone()
}
@@ -236,7 +236,7 @@ impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
}
}
pub(crate) trait TableMetaKeyGetTxnOp {
pub(crate) trait MetadataKeyGetTxnOp {
fn build_get_op(
&self,
) -> (
@@ -245,7 +245,7 @@ pub(crate) trait TableMetaKeyGetTxnOp {
);
}
pub trait TableMetaValue {
pub trait MetadataValue {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
where
Self: Sized;
@@ -330,7 +330,7 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
}
}
impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
impl<'de, T: DeserializeOwned + Serialize + MetadataValue> Deserialize<'de>
for DeserializedValueWithBytes<T>
{
/// - Deserialize behaviors:
@@ -359,7 +359,7 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
}
}
impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
impl<T: Serialize + DeserializeOwned + MetadataValue> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
@@ -1156,10 +1156,10 @@ impl TableMetadataManager {
}
#[macro_export]
macro_rules! impl_table_meta_value {
macro_rules! impl_metadata_value {
($($val_ty: ty), *) => {
$(
impl $crate::key::TableMetaValue for $val_ty {
impl $crate::key::MetadataValue for $val_ty {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
}
@@ -1172,10 +1172,10 @@ macro_rules! impl_table_meta_value {
}
}
macro_rules! impl_meta_key_get_txn_op {
macro_rules! impl_metadata_key_get_txn_op {
($($key: ty), *) => {
$(
impl $crate::key::TableMetaKeyGetTxnOp for $key {
impl $crate::key::MetadataKeyGetTxnOp for $key {
/// Returns a [TxnOp] to retrieve the corresponding value
/// and a filter to retrieve the value from the [TxnOpGetResponseSet]
fn build_get_op(
@@ -1197,7 +1197,7 @@ macro_rules! impl_meta_key_get_txn_op {
}
}
impl_meta_key_get_txn_op! {
impl_metadata_key_get_txn_op! {
TableNameKey<'_>,
TableInfoKey,
ViewInfoKey,
@@ -1206,7 +1206,7 @@ impl_meta_key_get_txn_op! {
}
#[macro_export]
macro_rules! impl_optional_meta_value {
macro_rules! impl_optional_metadata_value {
($($val_ty: ty), *) => {
$(
impl $val_ty {
@@ -1222,7 +1222,7 @@ macro_rules! impl_optional_meta_value {
}
}
impl_table_meta_value! {
impl_metadata_value! {
TableNameValue,
TableInfoValue,
ViewInfoValue,
@@ -1233,7 +1233,7 @@ impl_table_meta_value! {
TableFlowValue
}
impl_optional_meta_value! {
impl_optional_metadata_value! {
CatalogNameValue,
SchemaNameValue
}

View File

@@ -20,8 +20,8 @@ use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error::{self, Error, InvalidTableMetadataSnafu, Result};
use crate::key::{MetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
use crate::error::{self, Error, InvalidMetadataSnafu, Result};
use crate::key::{MetadataKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -56,14 +56,14 @@ impl<'a> CatalogNameKey<'a> {
}
}
impl<'a> MetaKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
impl<'a> MetadataKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<CatalogNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"CatalogNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -87,7 +87,7 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = CATALOG_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal CatalogNameKey format: '{s}'"),
})?;

View File

@@ -22,10 +22,10 @@ use snafu::OptionExt;
use store_api::storage::RegionNumber;
use table::metadata::TableId;
use super::MetaKey;
use crate::error::{InvalidTableMetadataSnafu, Result};
use super::MetadataKey;
use crate::error::{InvalidMetadataSnafu, Result};
use crate::key::{
RegionDistribution, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
@@ -77,14 +77,14 @@ impl DatanodeTableKey {
}
}
impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &[u8]) -> Result<DatanodeTableKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"DatanodeTableKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -92,12 +92,11 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
}
.build()
})?;
let captures =
DATANODE_TABLE_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
})?;
let captures = DATANODE_TABLE_KEY_PATTERN
.captures(key)
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
})?;
// Safety: pass the regex check above
let datanode_id = captures[1].parse::<DatanodeId>().unwrap();
let table_id = captures[2].parse::<TableId>().unwrap();

View File

@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
use crate::key::flow::flownode_flow::FlownodeFlowManager;
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{FlowId, MetaKey};
use crate::key::{FlowId, MetadataKey};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchDeleteRequest;
@@ -66,7 +66,7 @@ impl<T> FlowScoped<T> {
}
}
impl<'a, T: MetaKey<'a, T>> MetaKey<'a, FlowScoped<T>> for FlowScoped<T> {
impl<'a, T: MetadataKey<'a, T>> MetadataKey<'a, FlowScoped<T>> for FlowScoped<T> {
fn to_bytes(&self) -> Vec<u8> {
let prefix = FlowScoped::<T>::PREFIX.as_bytes();
let inner = self.inner.to_bytes();
@@ -295,7 +295,7 @@ mod tests {
inner: Vec<u8>,
}
impl<'a> MetaKey<'a, MockKey> for MockKey {
impl<'a> MetadataKey<'a, MockKey> for MockKey {
fn to_bytes(&self) -> Vec<u8> {
self.inner.clone()
}

View File

@@ -25,7 +25,7 @@ use table::table_name::TableName;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::FlownodeId;
@@ -42,7 +42,7 @@ lazy_static! {
/// The layout: `__flow/info/{flow_id}`.
pub struct FlowInfoKey(FlowScoped<FlowInfoKeyInner>);
impl<'a> MetaKey<'a, FlowInfoKey> for FlowInfoKey {
impl<'a> MetadataKey<'a, FlowInfoKey> for FlowInfoKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -80,14 +80,14 @@ impl FlowInfoKeyInner {
}
}
impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<FlowInfoKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -98,7 +98,7 @@ impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
let captures =
FLOW_INFO_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -24,7 +24,7 @@ use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{
BytesAdapter, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
@@ -76,7 +76,7 @@ impl<'a> FlowNameKey<'a> {
}
}
impl<'a> MetaKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
impl<'a> MetadataKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -95,7 +95,7 @@ pub struct FlowNameKeyInner<'a> {
pub flow_name: &'a str,
}
impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOW_NAME_KEY_PREFIX}/{}/{}",
@@ -106,7 +106,7 @@ impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowNameKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -117,7 +117,7 @@ impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
let captures =
FLOW_NAME_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -22,7 +22,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -68,7 +68,7 @@ impl FlowRouteKey {
}
}
impl<'a> MetaKey<'a, FlowRouteKey> for FlowRouteKey {
impl<'a> MetadataKey<'a, FlowRouteKey> for FlowRouteKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -101,7 +101,7 @@ impl FlowRouteKeyInner {
}
}
impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOW_ROUTE_KEY_PREFIX}/{}/{}",
@@ -112,7 +112,7 @@ impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<FlowRouteKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -123,7 +123,7 @@ impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
let captures =
FLOW_ROUTE_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
})?;
// Safety: pass the regex check above
@@ -209,7 +209,7 @@ impl FlowRouteManager {
#[cfg(test)]
mod tests {
use super::FlowRouteKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
#[test]
fn test_key_serialization() {

View File

@@ -22,7 +22,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
@@ -44,7 +44,7 @@ const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode";
/// The layout `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}`
pub struct FlownodeFlowKey(FlowScoped<FlownodeFlowKeyInner>);
impl<'a> MetaKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
impl<'a> MetadataKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -113,7 +113,7 @@ impl FlownodeFlowKeyInner {
}
}
impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}",
@@ -124,7 +124,7 @@ impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<FlownodeFlowKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"FlownodeFlowKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -135,7 +135,7 @@ impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
let captures =
FLOWNODE_FLOW_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid FlownodeFlowKeyInner '{key}'"),
})?;
// Safety: pass the regex check above
@@ -208,7 +208,7 @@ impl FlownodeFlowManager {
#[cfg(test)]
mod tests {
use crate::key::flow::flownode_flow::FlownodeFlowKey;
use crate::key::MetaKey;
use crate::key::MetadataKey;
#[test]
fn test_key_serialization() {

View File

@@ -23,7 +23,7 @@ use table::metadata::TableId;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -56,7 +56,7 @@ struct TableFlowKeyInner {
#[derive(Debug, PartialEq)]
pub struct TableFlowKey(FlowScoped<TableFlowKeyInner>);
impl<'a> MetaKey<'a, TableFlowKey> for TableFlowKey {
impl<'a> MetadataKey<'a, TableFlowKey> for TableFlowKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
@@ -129,7 +129,7 @@ impl TableFlowKeyInner {
}
}
impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
"{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}",
@@ -140,7 +140,7 @@ impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
fn from_bytes(bytes: &'a [u8]) -> Result<TableFlowKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
error::InvalidMetadataSnafu {
err_msg: format!(
"TableFlowKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -151,7 +151,7 @@ impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
let captures =
TABLE_FLOW_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
.context(error::InvalidMetadataSnafu {
err_msg: format!("Invalid TableFlowKeyInner '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -23,8 +23,8 @@ use humantime_serde::re::humantime;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error::{self, Error, InvalidTableMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -89,6 +89,19 @@ impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
}
}
impl From<SchemaNameValue> for HashMap<String, String> {
fn from(value: SchemaNameValue) -> Self {
let mut opts = HashMap::new();
if let Some(ttl) = value.ttl {
opts.insert(
OPT_KEY_TTL.to_string(),
format!("{}", humantime::format_duration(ttl)),
);
}
opts
}
}
impl<'a> SchemaNameKey<'a> {
pub fn new(catalog: &'a str, schema: &'a str) -> Self {
Self { catalog, schema }
@@ -109,14 +122,14 @@ impl Display for SchemaNameKey<'_> {
}
}
impl<'a> MetaKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
impl<'a> MetadataKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<SchemaNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"SchemaNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -142,7 +155,7 @@ impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = SCHEMA_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal SchemaNameKey format: '{s}'"),
})?;

View File

@@ -23,9 +23,9 @@ use table::table_name::TableName;
use table::table_reference::TableReference;
use super::TABLE_INFO_KEY_PATTERN;
use crate::error::{InvalidTableMetadataSnafu, Result};
use crate::error::{InvalidMetadataSnafu, Result};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX};
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchGetRequest;
@@ -51,14 +51,14 @@ impl Display for TableInfoKey {
}
}
impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &[u8]) -> Result<TableInfoKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"TableInfoKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -68,7 +68,7 @@ impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
})?;
let captures = TABLE_INFO_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid TableInfoKey '{key}'"),
})?;
// Safety: pass the regex check above

View File

@@ -22,8 +22,8 @@ use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use super::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidMetadataSnafu, Result};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
@@ -63,14 +63,14 @@ impl Display for TableNameKey<'_> {
}
}
impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<TableNameKey<'a>> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"TableNameKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -80,7 +80,7 @@ impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
})?;
let captures = TABLE_NAME_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid TableNameKey '{key}'"),
})?;
let catalog = captures.get(1).unwrap().as_str();
@@ -128,7 +128,7 @@ impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
fn try_from(s: &'a str) -> Result<Self> {
let captures = TABLE_NAME_KEY_PATTERN
.captures(s)
.context(InvalidTableMetadataSnafu {
.context(InvalidMetadataSnafu {
err_msg: format!("Illegal TableNameKey format: '{s}'"),
})?;
// Safety: pass the regex check above

View File

@@ -22,12 +22,12 @@ use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
use crate::error::{
self, InvalidTableMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu,
self, InvalidMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu,
TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu,
};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{
DeserializedValueWithBytes, MetaKey, RegionDistribution, TableMetaValue,
DeserializedValueWithBytes, MetadataKey, MetadataValue, RegionDistribution,
TABLE_ROUTE_KEY_PATTERN, TABLE_ROUTE_PREFIX,
};
use crate::kv_backend::txn::Txn;
@@ -199,7 +199,7 @@ impl TableRouteValue {
}
}
impl TableMetaValue for TableRouteValue {
impl MetadataValue for TableRouteValue {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
let r = serde_json::from_slice::<TableRouteValue>(raw_value);
match r {
@@ -244,14 +244,14 @@ impl LogicalTableRouteValue {
}
}
impl<'a> MetaKey<'a, TableRouteKey> for TableRouteKey {
impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &[u8]) -> Result<TableRouteKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
InvalidMetadataSnafu {
err_msg: format!(
"TableRouteKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
@@ -259,12 +259,11 @@ impl<'a> MetaKey<'a, TableRouteKey> for TableRouteKey {
}
.build()
})?;
let captures =
TABLE_ROUTE_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
err_msg: format!("Invalid TableRouteKey '{key}'"),
})?;
let captures = TABLE_ROUTE_KEY_PATTERN
.captures(key)
.context(InvalidMetadataSnafu {
err_msg: format!("Invalid TableRouteKey '{key}'"),
})?;
// Safety: pass the regex check above
let table_id = captures[1].parse::<TableId>().unwrap();
Ok(TableRouteKey { table_id })

View File

@@ -16,7 +16,7 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::error::Result;
use crate::key::{DeserializedValueWithBytes, TableMetaValue};
use crate::key::{DeserializedValueWithBytes, MetadataValue};
use crate::kv_backend::txn::TxnOpResponse;
use crate::rpc::KeyValue;
@@ -41,7 +41,7 @@ impl TxnOpGetResponseSet {
) -> impl FnMut(&mut TxnOpGetResponseSet) -> Result<Option<DeserializedValueWithBytes<T>>>
where
F: FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
T: Serialize + DeserializeOwned + TableMetaValue,
T: Serialize + DeserializeOwned + MetadataValue,
{
move |set| {
f(set)

View File

@@ -24,7 +24,7 @@ use table::table_name::TableName;
use super::VIEW_INFO_KEY_PATTERN;
use crate::error::{InvalidViewInfoSnafu, Result};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, VIEW_INFO_KEY_PREFIX};
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, VIEW_INFO_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchGetRequest;
@@ -53,7 +53,7 @@ impl Display for ViewInfoKey {
}
}
impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
impl<'a> MetadataKey<'a, ViewInfoKey> for ViewInfoKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use std::any::Any;
use std::string::FromUtf8Error;
use std::sync::Arc;
use common_error::ext::{BoxedError, ErrorExt};
@@ -141,12 +140,6 @@ pub enum Error {
procedure_id: ProcedureId,
},
#[snafu(display("Corrupted data, error: "))]
CorruptedData {
#[snafu(source)]
error: FromUtf8Error,
},
#[snafu(display("Failed to start the remove_outdated_meta method, error"))]
StartRemoveOutdatedMetaTask {
source: common_runtime::error::Error,
@@ -161,14 +154,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Subprocedure {} failed", subprocedure_id))]
SubprocedureFailed {
subprocedure_id: ProcedureId,
source: Arc<Error>,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to parse segment key: {key}"))]
ParseSegmentKey {
#[snafu(implicit)]
@@ -218,14 +203,11 @@ impl ErrorExt for Error {
StatusCode::InvalidArguments
}
Error::ProcedurePanic { .. }
| Error::CorruptedData { .. }
| Error::ParseSegmentKey { .. }
| Error::Unexpected { .. } => StatusCode::Unexpected,
Error::ProcedureExec { source, .. } => source.status_code(),
Error::StartRemoveOutdatedMetaTask { source, .. }
| Error::StopRemoveOutdatedMetaTask { source, .. } => source.status_code(),
Error::SubprocedureFailed { source, .. } => source.status_code(),
}
}

View File

@@ -19,10 +19,11 @@ use std::time::Duration;
use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::{debug, error, info};
use rand::Rng;
use snafu::ResultExt;
use tokio::time;
use super::rwlock::OwnedKeyRwLockGuard;
use crate::error::{self, ProcedurePanicSnafu, Result};
use crate::error::{self, ProcedurePanicSnafu, Result, RollbackTimesExceededSnafu};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
use crate::procedure::{Output, StringKey};
use crate::store::{ProcedureMessage, ProcedureStore};
@@ -222,12 +223,12 @@ impl Runner {
if let Some(d) = rollback.next() {
self.wait_on_err(d, rollback_times).await;
} else {
self.meta.set_state(ProcedureState::failed(Arc::new(
Error::RollbackTimesExceeded {
source: error.clone(),
let err = Err::<(), Arc<Error>>(error)
.context(RollbackTimesExceededSnafu {
procedure_id: self.meta.id,
},
)));
})
.unwrap_err();
self.meta.set_state(ProcedureState::failed(Arc::new(err)));
return;
}
}

View File

@@ -127,12 +127,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Not expected to run ExecutionPlan more than once"))]
ExecuteRepeatedly {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("General DataFusion error"))]
GeneralDataFusion {
#[snafu(source)]
@@ -193,12 +187,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failed to join thread"))]
ThreadJoin {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to decode logical plan: {source}"))]
DecodePlan {
#[snafu(implicit)]
@@ -289,9 +277,7 @@ impl ErrorExt for Error {
Error::MissingTableMutationHandler { .. }
| Error::MissingProcedureServiceHandler { .. }
| Error::MissingFlowServiceHandler { .. }
| Error::ExecuteRepeatedly { .. }
| Error::ThreadJoin { .. } => StatusCode::Unexpected,
| Error::MissingFlowServiceHandler { .. } => StatusCode::Unexpected,
Error::UnsupportedInputDataType { .. }
| Error::TypeCast { .. }
@@ -327,7 +313,6 @@ pub fn datafusion_status_code<T: ErrorExt + 'static>(
match e {
DataFusionError::Internal(_) => StatusCode::Internal,
DataFusionError::NotImplemented(_) => StatusCode::Unsupported,
DataFusionError::ResourcesExhausted(_) => StatusCode::RuntimeResourcesExhausted,
DataFusionError::Plan(_) => StatusCode::PlanQuery,
DataFusionError::External(e) => {
if let Some(ext) = (*e).downcast_ref::<T>() {

View File

@@ -172,12 +172,13 @@ impl ErrorExt for Error {
Error::DataTypes { .. }
| Error::CreateRecordBatches { .. }
| Error::PollStream { .. }
| Error::Format { .. }
| Error::ToArrowScalar { .. }
| Error::ProjectArrowRecordBatch { .. }
| Error::PhysicalExpr { .. } => StatusCode::Internal,
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,
Error::ArrowCompute { .. } => StatusCode::IllegalState,
Error::ColumnNotExists { .. } => StatusCode::TableColumnNotFound,

View File

@@ -17,7 +17,6 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use prost::{DecodeError, EncodeError};
use snafu::{Location, Snafu};
@@ -41,14 +40,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Internal error from DataFusion"))]
DFInternal {
#[snafu(source)]
error: DataFusionError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Internal error"))]
Internal {
#[snafu(implicit)]
@@ -56,12 +47,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Cannot convert plan doesn't belong to GreptimeDB"))]
UnknownPlan {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to encode DataFusion plan"))]
EncodeDfPlan {
#[snafu(source)]
@@ -84,10 +69,8 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::UnknownPlan { .. } | Error::EncodeRel { .. } | Error::DecodeRel { .. } => {
StatusCode::InvalidArguments
}
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
Error::EncodeRel { .. } | Error::DecodeRel { .. } => StatusCode::InvalidArguments,
Error::Internal { .. } => StatusCode::Internal,
Error::EncodeDfPlan { .. } | Error::DecodeDfPlan { .. } => StatusCode::Unexpected,
}
}

View File

@@ -34,4 +34,4 @@ tracing = "0.1"
tracing-appender = "0.2"
tracing-log = "0.1"
tracing-opentelemetry = "0.22.0"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }

View File

@@ -21,7 +21,7 @@ mod panic_hook;
pub mod tracing_context;
mod tracing_sampler;
pub use logging::{init_default_ut_logging, init_global_logging};
pub use logging::{init_default_ut_logging, init_global_logging, RELOAD_HANDLE};
pub use metric::dump_metrics;
pub use panic_hook::set_panic_hook;
pub use {common_error, tracing};
pub use {common_error, tracing, tracing_subscriber};

View File

@@ -16,7 +16,7 @@
use std::env;
use std::sync::{Arc, Mutex, Once};
use once_cell::sync::Lazy;
use once_cell::sync::{Lazy, OnceCell};
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::propagation::TraceContextPropagator;
@@ -26,6 +26,7 @@ use serde::{Deserialize, Serialize};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_appender::rolling::{RollingFileAppender, Rotation};
use tracing_log::LogTracer;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::fmt::Layer;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::prelude::*;
@@ -35,15 +36,41 @@ use crate::tracing_sampler::{create_sampler, TracingSampleOptions};
pub const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
// Handle for reloading log level
pub static RELOAD_HANDLE: OnceCell<tracing_subscriber::reload::Handle<Targets, Registry>> =
OnceCell::new();
/// The logging options that used to initialize the logger.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct LoggingOptions {
/// The directory to store log files. If not set, logs will be written to stdout.
pub dir: String,
/// The log level that can be one of "trace", "debug", "info", "warn", "error". Default is "info".
pub level: Option<String>,
pub enable_otlp_tracing: bool,
pub otlp_endpoint: Option<String>,
pub tracing_sample_ratio: Option<TracingSampleOptions>,
/// The log format that can be one of "json" or "text". Default is "text".
pub log_format: LogFormat,
/// Whether to append logs to stdout. Default is true.
pub append_stdout: bool,
/// Whether to enable tracing with OTLP. Default is false.
pub enable_otlp_tracing: bool,
/// The endpoint of OTLP. Default is "http://localhost:4317".
pub otlp_endpoint: Option<String>,
/// The tracing sample ratio.
pub tracing_sample_ratio: Option<TracingSampleOptions>,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum LogFormat {
Json,
Text,
}
impl PartialEq for LoggingOptions {
@@ -64,6 +91,7 @@ impl Default for LoggingOptions {
Self {
dir: "/tmp/greptimedb/logs".to_string(),
level: None,
log_format: LogFormat::Text,
enable_otlp_tracing: false,
otlp_endpoint: None,
tracing_sample_ratio: None,
@@ -128,62 +156,103 @@ pub fn init_global_logging(
let mut guards = vec![];
START.call_once(|| {
let dir = &opts.dir;
let level = &opts.level;
let enable_otlp_tracing = opts.enable_otlp_tracing;
// Enable log compatible layer to convert log record to tracing span.
LogTracer::init().expect("log tracer must be valid");
// stdout log layer.
// Configure the stdout logging layer.
let stdout_logging_layer = if opts.append_stdout {
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(stdout_guard);
let (writer, guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(guard);
Some(
Layer::new()
.with_writer(stdout_writer)
.with_ansi(atty::is(atty::Stream::Stdout)),
)
if opts.log_format == LogFormat::Json {
Some(
Layer::new()
.json()
.with_writer(writer)
.with_ansi(atty::is(atty::Stream::Stdout))
.boxed(),
)
} else {
Some(
Layer::new()
.with_writer(writer)
.with_ansi(atty::is(atty::Stream::Stdout))
.boxed(),
)
}
} else {
None
};
// file log layer.
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
let (rolling_writer, rolling_writer_guard) =
tracing_appender::non_blocking(rolling_appender);
let file_logging_layer = Layer::new().with_writer(rolling_writer).with_ansi(false);
guards.push(rolling_writer_guard);
// Configure the file logging layer with rolling policy.
let file_logging_layer = if !opts.dir.is_empty() {
let rolling_appender =
RollingFileAppender::new(Rotation::HOURLY, &opts.dir, "greptimedb");
let (writer, guard) = tracing_appender::non_blocking(rolling_appender);
guards.push(guard);
// error file log layer.
let err_rolling_appender =
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
let (err_rolling_writer, err_rolling_writer_guard) =
tracing_appender::non_blocking(err_rolling_appender);
let err_file_logging_layer = Layer::new()
.with_writer(err_rolling_writer)
.with_ansi(false);
guards.push(err_rolling_writer_guard);
if opts.log_format == LogFormat::Json {
Some(
Layer::new()
.json()
.with_writer(writer)
.with_ansi(false)
.boxed(),
)
} else {
Some(Layer::new().with_writer(writer).with_ansi(false).boxed())
}
} else {
None
};
// Configure the error file logging layer with rolling policy.
let err_file_logging_layer = if !opts.dir.is_empty() {
let rolling_appender =
RollingFileAppender::new(Rotation::HOURLY, &opts.dir, "greptimedb-err");
let (writer, guard) = tracing_appender::non_blocking(rolling_appender);
guards.push(guard);
if opts.log_format == LogFormat::Json {
Some(
Layer::new()
.json()
.with_writer(writer)
.with_ansi(false)
.with_filter(filter::LevelFilter::ERROR)
.boxed(),
)
} else {
Some(
Layer::new()
.with_writer(writer)
.with_ansi(false)
.with_filter(filter::LevelFilter::ERROR)
.boxed(),
)
}
} else {
None
};
// resolve log level settings from:
// - options from command line or config files
// - environment variable: RUST_LOG
// - default settings
let rust_log_env = std::env::var(EnvFilter::DEFAULT_ENV).ok();
let targets_string = level
let filter = opts
.level
.as_deref()
.or(rust_log_env.as_deref())
.unwrap_or(DEFAULT_LOG_TARGETS);
let filter = targets_string
.or(env::var(EnvFilter::DEFAULT_ENV).ok().as_deref())
.unwrap_or(DEFAULT_LOG_TARGETS)
.parse::<filter::Targets>()
.expect("error parsing log level string");
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
let (dyn_filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter.clone());
RELOAD_HANDLE
.set(reload_handle)
.expect("reload handle already set, maybe init_global_logging get called twice?");
// Must enable 'tokio_unstable' cfg to use this feature.
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
#[cfg(feature = "tokio-console")]
@@ -204,59 +273,70 @@ pub fn init_global_logging(
None
};
let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
let file_logging_layer = file_logging_layer.with_filter(filter);
Registry::default()
.with(dyn_filter)
.with(tokio_console_layer)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
.with(err_file_logging_layer)
};
// consume the `tracing_opts`, to avoid "unused" warnings
// consume the `tracing_opts` to avoid "unused" warnings.
let _ = tracing_opts;
#[cfg(not(feature = "tokio-console"))]
let subscriber = Registry::default()
.with(filter)
.with(dyn_filter)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
.with(err_file_logging_layer);
if enable_otlp_tracing {
if opts.enable_otlp_tracing {
global::set_text_map_propagator(TraceContextPropagator::new());
// otlp exporter
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
let trace_config = opentelemetry_sdk::trace::config()
.with_sampler(sampler)
.with_resource(opentelemetry_sdk::Resource::new(vec![
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(
resource::SERVICE_INSTANCE_ID,
node_id.unwrap_or("none".to_string()),
),
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
]));
let exporter = opentelemetry_otlp::new_exporter().tonic().with_endpoint(
opts.otlp_endpoint
.as_ref()
.map(|e| {
if e.starts_with("http") {
e.to_string()
} else {
format!("http://{}", e)
}
})
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
);
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter().tonic().with_endpoint(
opts.otlp_endpoint
.as_ref()
.map(|e| format!("http://{}", e))
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
),
)
.with_trace_config(
opentelemetry_sdk::trace::config()
.with_sampler(sampler)
.with_resource(opentelemetry_sdk::Resource::new(vec![
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(
resource::SERVICE_INSTANCE_ID,
node_id.unwrap_or("none".to_string()),
),
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
])),
)
.with_exporter(exporter)
.with_trace_config(trace_config)
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("otlp tracer install failed");
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
let subscriber = subscriber.with(tracing_layer);
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
tracing::subscriber::set_global_default(
subscriber.with(tracing_opentelemetry::layer().with_tracer(tracer)),
)
.expect("error setting global tracing subscriber");
} else {
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");

View File

@@ -46,13 +46,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to parse a string into Interval, raw string: {}", raw))]
ParseInterval {
raw: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Current timestamp overflow"))]
TimestampOverflow {
#[snafu(source)]
@@ -115,7 +108,6 @@ impl ErrorExt for Error {
Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => {
StatusCode::InvalidArguments
}
Error::ParseInterval { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -47,7 +47,8 @@ pub enum ObjectStoreConfig {
}
impl ObjectStoreConfig {
pub fn name(&self) -> &'static str {
/// Returns the object storage type name, such as `S3`, `Oss` etc.
pub fn provider_name(&self) -> &'static str {
match self {
Self::File(_) => "File",
Self::S3(_) => "S3",
@@ -56,6 +57,24 @@ impl ObjectStoreConfig {
Self::Gcs(_) => "Gcs",
}
}
/// Returns the object storage configuration name, return the provider name if it's empty.
pub fn config_name(&self) -> &str {
let name = match self {
// file storage doesn't support name
Self::File(_) => self.provider_name(),
Self::S3(s3) => &s3.name,
Self::Oss(oss) => &oss.name,
Self::Azblob(az) => &az.name,
Self::Gcs(gcs) => &gcs.name,
};
if name.trim().is_empty() {
return self.provider_name();
}
name
}
}
/// Storage engine config
@@ -66,6 +85,7 @@ pub struct StorageConfig {
pub data_home: String,
#[serde(flatten)]
pub store: ObjectStoreConfig,
/// Object storage providers
pub providers: Vec<ObjectStoreConfig>,
}
@@ -95,6 +115,7 @@ pub struct ObjectStorageCacheConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct S3Config {
pub name: String,
pub bucket: String,
pub root: String,
#[serde(skip_serializing)]
@@ -109,7 +130,8 @@ pub struct S3Config {
impl PartialEq for S3Config {
fn eq(&self, other: &Self) -> bool {
self.bucket == other.bucket
self.name == other.name
&& self.bucket == other.bucket
&& self.root == other.root
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
@@ -122,6 +144,7 @@ impl PartialEq for S3Config {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct OssConfig {
pub name: String,
pub bucket: String,
pub root: String,
#[serde(skip_serializing)]
@@ -135,7 +158,8 @@ pub struct OssConfig {
impl PartialEq for OssConfig {
fn eq(&self, other: &Self) -> bool {
self.bucket == other.bucket
self.name == other.name
&& self.bucket == other.bucket
&& self.root == other.root
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
@@ -147,6 +171,7 @@ impl PartialEq for OssConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct AzblobConfig {
pub name: String,
pub container: String,
pub root: String,
#[serde(skip_serializing)]
@@ -161,7 +186,8 @@ pub struct AzblobConfig {
impl PartialEq for AzblobConfig {
fn eq(&self, other: &Self) -> bool {
self.container == other.container
self.name == other.name
&& self.container == other.container
&& self.root == other.root
&& self.account_name.expose_secret() == other.account_name.expose_secret()
&& self.account_key.expose_secret() == other.account_key.expose_secret()
@@ -174,6 +200,7 @@ impl PartialEq for AzblobConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct GcsConfig {
pub name: String,
pub root: String,
pub bucket: String,
pub scope: String,
@@ -188,7 +215,8 @@ pub struct GcsConfig {
impl PartialEq for GcsConfig {
fn eq(&self, other: &Self) -> bool {
self.root == other.root
self.name == other.name
&& self.root == other.root
&& self.bucket == other.bucket
&& self.scope == other.scope
&& self.credential_path.expose_secret() == other.credential_path.expose_secret()
@@ -201,6 +229,7 @@ impl PartialEq for GcsConfig {
impl Default for S3Config {
fn default() -> Self {
Self {
name: String::default(),
bucket: String::default(),
root: String::default(),
access_key_id: SecretString::from(String::default()),
@@ -215,6 +244,7 @@ impl Default for S3Config {
impl Default for OssConfig {
fn default() -> Self {
Self {
name: String::default(),
bucket: String::default(),
root: String::default(),
access_key_id: SecretString::from(String::default()),
@@ -228,6 +258,7 @@ impl Default for OssConfig {
impl Default for AzblobConfig {
fn default() -> Self {
Self {
name: String::default(),
container: String::default(),
root: String::default(),
account_name: SecretString::from(String::default()),
@@ -242,6 +273,7 @@ impl Default for AzblobConfig {
impl Default for GcsConfig {
fn default() -> Self {
Self {
name: String::default(),
root: String::default(),
bucket: String::default(),
scope: String::default(),
@@ -355,6 +387,23 @@ mod tests {
let _parsed: DatanodeOptions = toml::from_str(&toml_string).unwrap();
}
#[test]
fn test_config_name() {
let object_store_config = ObjectStoreConfig::default();
assert_eq!("File", object_store_config.config_name());
let s3_config = ObjectStoreConfig::S3(S3Config::default());
assert_eq!("S3", s3_config.config_name());
assert_eq!("S3", s3_config.provider_name());
let s3_config = ObjectStoreConfig::S3(S3Config {
name: "test".to_string(),
..Default::default()
});
assert_eq!("test", s3_config.config_name());
assert_eq!("S3", s3_config.provider_name());
}
#[test]
fn test_secstr() {
let toml_str = r#"

View File

@@ -273,11 +273,11 @@ impl DatanodeBuilder {
/// Builds [ObjectStoreManager] from [StorageConfig].
pub async fn build_object_store_manager(cfg: &StorageConfig) -> Result<ObjectStoreManagerRef> {
let object_store = store::new_object_store(cfg.store.clone(), &cfg.data_home).await?;
let default_name = cfg.store.name();
let default_name = cfg.store.config_name();
let mut object_store_manager = ObjectStoreManager::new(default_name, object_store);
for store in &cfg.providers {
object_store_manager.add(
store.name(),
store.config_name(),
store::new_object_store(store.clone(), &cfg.data_home).await?,
);
}

View File

@@ -98,13 +98,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Columns and values number mismatch, columns: {}, values: {}",
columns,
values
))]
ColumnValuesNumberMismatch { columns: usize, values: usize },
#[snafu(display("Failed to delete value from table: {}", table_name))]
Delete {
table_name: String,
@@ -156,13 +149,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Runtime resource error"))]
RuntimeResource {
#[snafu(implicit)]
location: Location,
source: common_runtime::error::Error,
},
#[snafu(display("Expect KvBackend but not found"))]
MissingKvBackend {
#[snafu(implicit)]
@@ -172,16 +158,6 @@ pub enum Error {
#[snafu(display("Invalid SQL, error: {}", msg))]
InvalidSql { msg: String },
#[snafu(display("Not support SQL, error: {}", msg))]
NotSupportSql { msg: String },
#[snafu(display("Specified timestamp key or primary key column not found: {}", name))]
KeyColumnNotFound {
name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Illegal primary keys definition: {}", msg))]
IllegalPrimaryKeysDef {
msg: String,
@@ -210,14 +186,6 @@ pub enum Error {
source: meta_client::error::Error,
},
#[snafu(display(
"Table id provider not found, cannot execute SQL directly on datanode in distributed mode"
))]
TableIdProviderNotFound {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Missing node id in Datanode config"))]
MissingNodeId {
#[snafu(implicit)]
@@ -231,9 +199,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Cannot find requested database: {}-{}", catalog, schema))]
DatabaseNotFound { catalog: String, schema: String },
#[snafu(display(
"No valid default value can be built automatically, column: {}",
column,
@@ -264,12 +229,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Missing WAL dir config"))]
MissingWalDirConfig {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unexpected, violated: {}", violated))]
Unexpected {
violated: String,
@@ -320,13 +279,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Unsupported gRPC request, kind: {}", kind))]
UnsupportedGrpcRequest {
kind: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unsupported output type, expected: {}", expected))]
UnsupportedOutput {
expected: String,
@@ -395,20 +347,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to setup plugin"))]
SetupPlugin {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to start plugin"))]
StartPlugin {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -430,19 +368,14 @@ impl ErrorExt for Error {
Delete { source, .. } => source.status_code(),
ColumnValuesNumberMismatch { .. }
| InvalidSql { .. }
| NotSupportSql { .. }
| KeyColumnNotFound { .. }
InvalidSql { .. }
| IllegalPrimaryKeysDef { .. }
| MissingTimestampColumn { .. }
| CatalogNotFound { .. }
| SchemaNotFound { .. }
| SchemaExists { .. }
| DatabaseNotFound { .. }
| MissingNodeId { .. }
| ColumnNoneDefaultValue { .. }
| MissingWalDirConfig { .. }
| Catalog { .. }
| MissingRequiredField { .. }
| RegionEngineNotFound { .. }
@@ -456,12 +389,9 @@ impl ErrorExt for Error {
AsyncTaskExecute { source, .. } => source.status_code(),
CreateDir { .. }
| RemoveDir { .. }
| ShutdownInstance { .. }
| DataFusion { .. }
| SetupPlugin { .. }
| StartPlugin { .. } => StatusCode::Internal,
CreateDir { .. } | RemoveDir { .. } | ShutdownInstance { .. } | DataFusion { .. } => {
StatusCode::Internal
}
RegionNotFound { .. } => StatusCode::RegionNotFound,
RegionNotReady { .. } => StatusCode::RegionNotReady,
@@ -472,11 +402,8 @@ impl ErrorExt for Error {
InitBackend { .. } => StatusCode::StorageUnavailable,
OpenLogStore { source, .. } => source.status_code(),
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
MetaClientInit { source, .. } => source.status_code(),
UnsupportedOutput { .. }
| TableIdProviderNotFound { .. }
| UnsupportedGrpcRequest { .. } => StatusCode::Unsupported,
UnsupportedOutput { .. } => StatusCode::Unsupported,
HandleRegionRequest { source, .. }
| GetRegionMetadata { source, .. }
| HandleBatchOpenRequest { source, .. } => source.status_code(),

View File

@@ -37,7 +37,7 @@ use crate::alive_keeper::RegionAliveKeeper;
use crate::config::DatanodeOptions;
use crate::error::{self, MetaClientInitSnafu, Result};
use crate::event_listener::RegionServerEventReceiver;
use crate::metrics;
use crate::metrics::{self, HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
use crate::region_server::RegionServer;
pub(crate) mod handler;
@@ -231,10 +231,12 @@ impl HeartbeatTask {
mailbox_message: Some(message),
..Default::default()
};
HEARTBEAT_RECV_COUNT.with_label_values(&["success"]).inc();
Some(req)
}
Err(e) => {
error!(e; "Failed to encode mailbox messages!");
HEARTBEAT_RECV_COUNT.with_label_values(&["error"]).inc();
None
}
}
@@ -304,6 +306,8 @@ impl HeartbeatTask {
error!(e; "Failed to reconnect to metasrv!");
}
}
} else {
HEARTBEAT_SENT_COUNT.inc();
}
}
}
@@ -320,10 +324,12 @@ impl HeartbeatTask {
region_id: stat.region_id.as_u64(),
engine: stat.engine,
role: RegionRole::from(stat.role).into(),
// TODO(jeremy): w/rcus
// TODO(weny): w/rcus
rcus: 0,
wcus: 0,
approximate_bytes: region_server.region_disk_usage(stat.region_id).unwrap_or(0),
// TODO(weny): add extensions
extensions: Default::default(),
})
.collect()
}

View File

@@ -54,4 +54,17 @@ lazy_static! {
&[REGION_ROLE]
)
.unwrap();
/// The number of heartbeats send by datanode.
pub static ref HEARTBEAT_SENT_COUNT: IntCounter = register_int_counter!(
"greptime_datanode_heartbeat_send_count",
"datanode heartbeat sent",
)
.unwrap();
/// The number of heartbeats received by datanode, labeled with result type.
pub static ref HEARTBEAT_RECV_COUNT: IntCounterVec = register_int_counter_vec!(
"greptime_datanode_heartbeat_recv_count",
"datanode heartbeat received",
&["result"]
)
.unwrap();
}

View File

@@ -366,10 +366,10 @@ impl RegionServerHandler for RegionServer {
// merge results by sum up affected rows and merge extensions.
let mut affected_rows = 0;
let mut extension = HashMap::new();
let mut extensions = HashMap::new();
for result in results {
affected_rows += result.affected_rows;
extension.extend(result.extension);
extensions.extend(result.extensions);
}
Ok(RegionResponseV1 {
@@ -380,7 +380,7 @@ impl RegionServerHandler for RegionServer {
}),
}),
affected_rows: affected_rows as _,
extension,
extensions,
})
}
}
@@ -708,7 +708,7 @@ impl RegionServerInner {
.await?;
Ok(RegionResponse {
affected_rows: result.affected_rows,
extension: result.extension,
extensions: result.extensions,
})
}
Err(err) => {
@@ -860,7 +860,7 @@ impl RegionServerInner {
// complains "higher-ranked lifetime error". Rust can't prove some future is legit.
// Possible related issue: https://github.com/rust-lang/rust/issues/102211
//
// The walkaround is to put the async functions in the `common_runtime::spawn_global`. Or like
// The workaround is to put the async functions in the `common_runtime::spawn_global`. Or like
// it here, collect the values first then use later separately.
let regions = self

View File

@@ -15,6 +15,7 @@ workspace = true
arrow.workspace = true
arrow-array.workspace = true
arrow-schema.workspace = true
base64.workspace = true
common-base.workspace = true
common-decimal.workspace = true
common-error.workspace = true
@@ -23,6 +24,7 @@ common-telemetry.workspace = true
common-time.workspace = true
datafusion-common.workspace = true
enum_dispatch = "0.3"
greptime-proto.workspace = true
num = "0.4"
num-traits = "0.2"
ordered-float = { version = "3.0", features = ["serde"] }

View File

@@ -18,6 +18,8 @@ use std::sync::Arc;
use arrow::datatypes::{DataType as ArrowDataType, Field};
use arrow_array::{Array, ListArray};
use base64::engine::general_purpose::URL_SAFE;
use base64::Engine as _;
use common_base::bytes::{Bytes, StringBytes};
use common_decimal::Decimal128;
use common_telemetry::error;
@@ -28,8 +30,10 @@ use common_time::time::Time;
use common_time::timestamp::{TimeUnit, Timestamp};
use common_time::{Duration, Interval, Timezone};
use datafusion_common::ScalarValue;
use greptime_proto::v1::value::ValueData;
pub use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize, Serializer};
use serde_json::{Number, Value as JsonValue};
use snafu::{ensure, ResultExt};
use crate::error::{self, ConvertArrowArrayToScalarsSnafu, Error, Result, TryFromValueSnafu};
@@ -268,6 +272,23 @@ impl Value {
}
}
/// Cast Value to f64. Return None if it's not castable;
pub fn as_f64_lossy(&self) -> Option<f64> {
match self {
Value::Float32(v) => Some(v.0 as _),
Value::Float64(v) => Some(v.0),
Value::Int8(v) => Some(*v as _),
Value::Int16(v) => Some(*v as _),
Value::Int32(v) => Some(*v as _),
Value::Int64(v) => Some(*v as _),
Value::UInt8(v) => Some(*v as _),
Value::UInt16(v) => Some(*v as _),
Value::UInt32(v) => Some(*v as _),
Value::UInt64(v) => Some(*v as _),
_ => None,
}
}
/// Returns the logical type of the value.
pub fn logical_type_id(&self) -> LogicalTypeId {
match self {
@@ -1347,15 +1368,179 @@ impl<'a> ValueRef<'a> {
}
}
pub fn column_data_to_json(data: ValueData) -> JsonValue {
match data {
ValueData::BinaryValue(b) => JsonValue::String(URL_SAFE.encode(b)),
ValueData::BoolValue(b) => JsonValue::Bool(b),
ValueData::U8Value(i) => JsonValue::Number(i.into()),
ValueData::U16Value(i) => JsonValue::Number(i.into()),
ValueData::U32Value(i) => JsonValue::Number(i.into()),
ValueData::U64Value(i) => JsonValue::Number(i.into()),
ValueData::I8Value(i) => JsonValue::Number(i.into()),
ValueData::I16Value(i) => JsonValue::Number(i.into()),
ValueData::I32Value(i) => JsonValue::Number(i.into()),
ValueData::I64Value(i) => JsonValue::Number(i.into()),
ValueData::F32Value(f) => Number::from_f64(f as f64)
.map(JsonValue::Number)
.unwrap_or(JsonValue::Null),
ValueData::F64Value(f) => Number::from_f64(f)
.map(JsonValue::Number)
.unwrap_or(JsonValue::Null),
ValueData::StringValue(s) => JsonValue::String(s),
ValueData::DateValue(d) => JsonValue::String(Date::from(d).to_string()),
ValueData::DatetimeValue(d) => JsonValue::String(DateTime::from(d).to_string()),
ValueData::TimeSecondValue(d) => JsonValue::String(Time::new_second(d).to_iso8601_string()),
ValueData::TimeMillisecondValue(d) => {
JsonValue::String(Time::new_millisecond(d).to_iso8601_string())
}
ValueData::TimeMicrosecondValue(d) => {
JsonValue::String(Time::new_microsecond(d).to_iso8601_string())
}
ValueData::TimeNanosecondValue(d) => {
JsonValue::String(Time::new_nanosecond(d).to_iso8601_string())
}
ValueData::TimestampMicrosecondValue(d) => {
JsonValue::String(Timestamp::new_microsecond(d).to_iso8601_string())
}
ValueData::TimestampMillisecondValue(d) => {
JsonValue::String(Timestamp::new_millisecond(d).to_iso8601_string())
}
ValueData::TimestampNanosecondValue(d) => {
JsonValue::String(Timestamp::new_nanosecond(d).to_iso8601_string())
}
ValueData::TimestampSecondValue(d) => {
JsonValue::String(Timestamp::new_second(d).to_iso8601_string())
}
ValueData::IntervalYearMonthValue(d) => JsonValue::String(format!("interval year [{}]", d)),
ValueData::IntervalMonthDayNanoValue(d) => JsonValue::String(format!(
"interval month [{}][{}][{}]",
d.months, d.days, d.nanoseconds
)),
ValueData::IntervalDayTimeValue(d) => JsonValue::String(format!("interval day [{}]", d)),
ValueData::Decimal128Value(d) => {
JsonValue::String(format!("decimal128 [{}][{}]", d.hi, d.lo))
}
}
}
#[cfg(test)]
mod tests {
use arrow::datatypes::DataType as ArrowDataType;
use common_time::timezone::set_default_timezone;
use greptime_proto::v1::{Decimal128 as ProtoDecimal128, IntervalMonthDayNano};
use num_traits::Float;
use super::*;
use crate::vectors::ListVectorBuilder;
#[test]
fn test_column_data_to_json() {
assert_eq!(
column_data_to_json(ValueData::BinaryValue(b"hello".to_vec())),
JsonValue::String("aGVsbG8=".to_string())
);
assert_eq!(
column_data_to_json(ValueData::BoolValue(true)),
JsonValue::Bool(true)
);
assert_eq!(
column_data_to_json(ValueData::U8Value(1)),
JsonValue::Number(1.into())
);
assert_eq!(
column_data_to_json(ValueData::U16Value(2)),
JsonValue::Number(2.into())
);
assert_eq!(
column_data_to_json(ValueData::U32Value(3)),
JsonValue::Number(3.into())
);
assert_eq!(
column_data_to_json(ValueData::U64Value(4)),
JsonValue::Number(4.into())
);
assert_eq!(
column_data_to_json(ValueData::I8Value(5)),
JsonValue::Number(5.into())
);
assert_eq!(
column_data_to_json(ValueData::I16Value(6)),
JsonValue::Number(6.into())
);
assert_eq!(
column_data_to_json(ValueData::I32Value(7)),
JsonValue::Number(7.into())
);
assert_eq!(
column_data_to_json(ValueData::I64Value(8)),
JsonValue::Number(8.into())
);
assert_eq!(
column_data_to_json(ValueData::F32Value(9.0)),
JsonValue::Number(Number::from_f64(9.0_f64).unwrap())
);
assert_eq!(
column_data_to_json(ValueData::F64Value(10.0)),
JsonValue::Number(Number::from_f64(10.0_f64).unwrap())
);
assert_eq!(
column_data_to_json(ValueData::StringValue("hello".to_string())),
JsonValue::String("hello".to_string())
);
assert_eq!(
column_data_to_json(ValueData::DateValue(123)),
JsonValue::String("1970-05-04".to_string())
);
assert_eq!(
column_data_to_json(ValueData::DatetimeValue(456)),
JsonValue::String("1970-01-01 00:00:00.456+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimeSecondValue(789)),
JsonValue::String("00:13:09+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimeMillisecondValue(789)),
JsonValue::String("00:00:00.789+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimeMicrosecondValue(789)),
JsonValue::String("00:00:00.000789+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimestampMillisecondValue(1234567890)),
JsonValue::String("1970-01-15 06:56:07.890+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimestampNanosecondValue(1234567890123456789)),
JsonValue::String("2009-02-13 23:31:30.123456789+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::TimestampSecondValue(1234567890)),
JsonValue::String("2009-02-13 23:31:30+0000".to_string())
);
assert_eq!(
column_data_to_json(ValueData::IntervalYearMonthValue(12)),
JsonValue::String("interval year [12]".to_string())
);
assert_eq!(
column_data_to_json(ValueData::IntervalMonthDayNanoValue(IntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
})),
JsonValue::String("interval month [1][2][3]".to_string())
);
assert_eq!(
column_data_to_json(ValueData::IntervalDayTimeValue(4)),
JsonValue::String("interval day [4]".to_string())
);
assert_eq!(
column_data_to_json(ValueData::Decimal128Value(ProtoDecimal128 { hi: 5, lo: 6 })),
JsonValue::String("decimal128 [5][6]".to_string())
);
}
#[test]
fn test_try_from_scalar_value() {
assert_eq!(

View File

@@ -49,13 +49,13 @@ use crate::adapter::table_source::TableSource;
use crate::adapter::util::column_schemas_to_proto;
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
use crate::df_optimizer::sql_to_flow_plan;
use crate::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
use crate::expr::GlobalId;
use crate::metrics::{
METRIC_FLOW_INPUT_BUF_SIZE, METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_RUN_INTERVAL_MS,
};
use crate::repr::{self, DiffRow, Row, BATCH_SIZE};
use crate::transform::sql_to_flow_plan;
mod flownode_impl;
mod parse_expr;

View File

@@ -28,7 +28,7 @@ use super::state::Scheduler;
use crate::compute::state::DataflowState;
use crate::compute::types::{Collection, CollectionBundle, ErrCollector, Toff};
use crate::error::{Error, InvalidQuerySnafu, NotImplementedSnafu};
use crate::expr::{self, GlobalId, LocalId};
use crate::expr::{self, Batch, GlobalId, LocalId};
use crate::plan::{Plan, TypedPlan};
use crate::repr::{self, DiffRow};
@@ -87,9 +87,38 @@ impl<'referred, 'df> Context<'referred, 'df> {
}
impl<'referred, 'df> Context<'referred, 'df> {
/// Interpret and execute plan
/// Like `render_plan` but in Batch Mode
pub fn render_plan_batch(&mut self, plan: TypedPlan) -> Result<CollectionBundle<Batch>, Error> {
match plan.plan {
Plan::Constant { rows } => Ok(self.render_constant_batch(rows)),
Plan::Get { .. } => NotImplementedSnafu {
reason: "Get is still WIP in batchmode",
}
.fail(),
Plan::Let { .. } => NotImplementedSnafu {
reason: "Let is still WIP in batchmode",
}
.fail(),
Plan::Mfp { input, mfp } => self.render_mfp_batch(input, mfp),
Plan::Reduce {
input,
key_val_plan,
reduce_plan,
} => self.render_reduce_batch(input, &key_val_plan, &reduce_plan, &plan.schema.typ),
Plan::Join { .. } => NotImplementedSnafu {
reason: "Join is still WIP",
}
.fail(),
Plan::Union { .. } => NotImplementedSnafu {
reason: "Union is still WIP",
}
.fail(),
}
}
/// Interpret plan to dataflow and prepare them for execution
///
/// return the output of this plan
/// return the output handler of this plan
pub fn render_plan(&mut self, plan: TypedPlan) -> Result<CollectionBundle, Error> {
match plan.plan {
Plan::Constant { rows } => Ok(self.render_constant(rows)),
@@ -112,17 +141,61 @@ impl<'referred, 'df> Context<'referred, 'df> {
}
}
/// render Constant, take all rows that have a timestamp not greater than the current time
/// This function is primarily used for testing
/// Always assume input is sorted by timestamp
pub fn render_constant_batch(&mut self, rows: Vec<DiffRow>) -> CollectionBundle<Batch> {
let (send_port, recv_port) = self.df.make_edge::<_, Toff<Batch>>("constant_batch");
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
per_time.entry(key).or_default().extend(group);
}
let now = self.compute_state.current_time_ref();
// TODO(discord9): better way to schedule future run
let scheduler = self.compute_state.get_scheduler();
let scheduler_inner = scheduler.clone();
let err_collector = self.err_collector.clone();
let subgraph_id =
self.df
.add_subgraph_source("ConstantBatch", send_port, move |_ctx, send_port| {
// find the first timestamp that is greater than now
// use filter_map
let mut after = per_time.split_off(&(*now.borrow() + 1));
// swap
std::mem::swap(&mut per_time, &mut after);
let not_great_than_now = after;
not_great_than_now.into_iter().for_each(|(_ts, rows)| {
err_collector.run(|| {
let rows = rows.into_iter().map(|(row, _ts, _diff)| row).collect();
let batch = Batch::try_from_rows(rows)?;
send_port.give(vec![batch]);
Ok(())
});
});
// schedule the next run
if let Some(next_run_time) = per_time.keys().next().copied() {
scheduler_inner.schedule_at(next_run_time);
}
});
scheduler.set_cur_subgraph(subgraph_id);
CollectionBundle::from_collection(Collection::from_port(recv_port))
}
/// render Constant, take all rows that have a timestamp not greater than the current time
///
/// Always assume input is sorted by timestamp
pub fn render_constant(&mut self, rows: Vec<DiffRow>) -> CollectionBundle {
let (send_port, recv_port) = self.df.make_edge::<_, Toff>("constant");
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = rows
.into_iter()
.group_by(|(_row, ts, _diff)| *ts)
.into_iter()
.map(|(k, v)| (k, v.into_iter().collect_vec()))
.collect();
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
per_time.entry(key).or_default().extend(group);
}
let now = self.compute_state.current_time_ref();
// TODO(discord9): better way to schedule future run
let scheduler = self.compute_state.get_scheduler();

View File

@@ -23,12 +23,59 @@ use crate::compute::render::Context;
use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
use crate::error::{Error, PlanSnafu};
use crate::expr::{EvalError, MapFilterProject, MfpPlan, ScalarExpr};
use crate::expr::{Batch, EvalError, MapFilterProject, MfpPlan, ScalarExpr};
use crate::plan::TypedPlan;
use crate::repr::{self, DiffRow, KeyValDiffRow, Row};
use crate::utils::ArrangeHandler;
impl<'referred, 'df> Context<'referred, 'df> {
/// Like `render_mfp` but in batch mode
pub fn render_mfp_batch(
&mut self,
input: Box<TypedPlan>,
mfp: MapFilterProject,
) -> Result<CollectionBundle<Batch>, Error> {
let input = self.render_plan_batch(*input)?;
let (out_send_port, out_recv_port) = self.df.make_edge::<_, Toff<Batch>>("mfp_batch");
// This closure capture following variables:
let mfp_plan = MfpPlan::create_from(mfp)?;
let err_collector = self.err_collector.clone();
// TODO(discord9): better way to schedule future run
let scheduler = self.compute_state.get_scheduler();
let subgraph = self.df.add_subgraph_in_out(
"mfp_batch",
input.collection.into_inner(),
out_send_port,
move |_ctx, recv, send| {
// mfp only need to passively receive updates from recvs
let src_data = recv.take_inner().into_iter().flat_map(|v| v.into_iter());
let output_batches = src_data
.filter_map(|mut input_batch| {
err_collector.run(|| {
let res_batch = mfp_plan.mfp.eval_batch_into(&mut input_batch)?;
Ok(res_batch)
})
})
.collect_vec();
send.give(output_batches);
},
);
// register current subgraph in scheduler for future scheduling
scheduler.set_cur_subgraph(subgraph);
let bundle =
CollectionBundle::from_collection(Collection::<Batch>::from_port(out_recv_port));
Ok(bundle)
}
/// render MapFilterProject, will only emit the `rows` once. Assume all incoming row's sys time being `now`` and ignore the row's stated sys time
/// TODO(discord9): schedule mfp operator to run when temporal filter need
///

View File

@@ -14,23 +14,247 @@
use std::collections::BTreeMap;
use std::ops::Range;
use std::sync::Arc;
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::DataType;
use datatypes::value::{ListValue, Value};
use datatypes::vectors::NullVector;
use hydroflow::scheduled::graph_ext::GraphExt;
use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
use crate::compute::render::{Context, SubgraphArg};
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
use crate::error::{Error, PlanSnafu};
use crate::error::{Error, NotImplementedSnafu, PlanSnafu};
use crate::expr::error::{DataAlreadyExpiredSnafu, DataTypeSnafu, InternalSnafu};
use crate::expr::{EvalError, ScalarExpr};
use crate::expr::{Batch, EvalError, ScalarExpr};
use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, ReducePlan, TypedPlan};
use crate::repr::{self, DiffRow, KeyValDiffRow, RelationType, Row};
use crate::utils::{ArrangeHandler, ArrangeReader, ArrangeWriter, KeyExpiryManager};
impl<'referred, 'df> Context<'referred, 'df> {
const REDUCE_BATCH: &'static str = "reduce_batch";
/// Like `render_reduce`, but for batch mode, and only barebone implementation
/// no support for distinct aggregation for now
// There is a false positive in using `Vec<ScalarExpr>` as key due to `Value` have `bytes` variant
#[allow(clippy::mutable_key_type)]
pub fn render_reduce_batch(
&mut self,
input: Box<TypedPlan>,
key_val_plan: &KeyValPlan,
reduce_plan: &ReducePlan,
output_type: &RelationType,
) -> Result<CollectionBundle<Batch>, Error> {
let accum_plan = if let ReducePlan::Accumulable(accum_plan) = reduce_plan {
if !accum_plan.distinct_aggrs.is_empty() {
NotImplementedSnafu {
reason: "Distinct aggregation is not supported in batch mode",
}
.fail()?
}
accum_plan.clone()
} else {
NotImplementedSnafu {
reason: "Only accumulable reduce plan is supported in batch mode",
}
.fail()?
};
let input = self.render_plan_batch(*input)?;
// first assembly key&val to separate key and val columns(since this is batch mode)
// Then stream kvs through a reduce operator
// the output is concat from key and val
let output_key_arity = key_val_plan.key_plan.output_arity();
// TODO(discord9): config global expire time from self
let arrange_handler = self.compute_state.new_arrange(None);
if let (Some(time_index), Some(expire_after)) =
(output_type.time_index, self.compute_state.expire_after())
{
let expire_man =
KeyExpiryManager::new(Some(expire_after), Some(ScalarExpr::Column(time_index)));
arrange_handler.write().set_expire_state(expire_man);
}
// reduce need full arrangement to be able to query all keys
let arrange_handler_inner = arrange_handler.clone_full_arrange().context(PlanSnafu {
reason: "No write is expected at this point",
})?;
let key_val_plan = key_val_plan.clone();
let now = self.compute_state.current_time_ref();
let err_collector = self.err_collector.clone();
// TODO(discord9): better way to schedule future run
let scheduler = self.compute_state.get_scheduler();
let (out_send_port, out_recv_port) =
self.df.make_edge::<_, Toff<Batch>>(Self::REDUCE_BATCH);
let subgraph =
self.df.add_subgraph_in_out(
Self::REDUCE_BATCH,
input.collection.into_inner(),
out_send_port,
move |_ctx, recv, send| {
let now = *(now.borrow());
let arrange = arrange_handler_inner.clone();
// mfp only need to passively receive updates from recvs
let src_data = recv
.take_inner()
.into_iter()
.flat_map(|v| v.into_iter())
.collect_vec();
let mut key_to_many_vals = BTreeMap::<Row, Batch>::new();
for batch in src_data {
err_collector.run(|| {
let (key_batch, val_batch) =
batch_split_by_key_val(&batch, &key_val_plan, &err_collector);
ensure!(
key_batch.row_count() == val_batch.row_count(),
InternalSnafu {
reason: format!(
"Key and val batch should have the same row count, found {} and {}",
key_batch.row_count(),
val_batch.row_count()
)
}
);
for row_idx in 0..key_batch.row_count() {
let key_row = key_batch.get_row(row_idx).unwrap();
let val_row = val_batch.slice(row_idx, 1)?;
let val_batch =
key_to_many_vals.entry(Row::new(key_row)).or_default();
val_batch.append_batch(val_row)?;
}
Ok(())
});
}
// write lock the arrange for the rest of the function body
// to prevent wired race condition
let mut arrange = arrange.write();
let mut all_arrange_updates = Vec::with_capacity(key_to_many_vals.len());
let mut all_output_rows = Vec::with_capacity(key_to_many_vals.len());
for (key, val_batch) in key_to_many_vals {
err_collector.run(|| -> Result<(), _> {
let (accums, _, _) = arrange.get(now, &key).unwrap_or_default();
let accum_list = from_accum_values_to_live_accums(
accums.unpack(),
accum_plan.simple_aggrs.len(),
)?;
let mut accum_output = AccumOutput::new();
for AggrWithIndex {
expr,
input_idx,
output_idx,
} in accum_plan.simple_aggrs.iter()
{
let cur_old_accum = accum_list.get(*output_idx).cloned().unwrap_or_default();
// if batch is empty, input null instead
let cur_input = val_batch.batch().get(*input_idx).cloned().unwrap_or_else(||Arc::new(NullVector::new(val_batch.row_count())));
let (output, new_accum) =
expr.func.eval_batch(cur_old_accum, cur_input, None)?;
accum_output.insert_accum(*output_idx, new_accum);
accum_output.insert_output(*output_idx, output);
}
let (new_accums, res_val_row) = accum_output.into_accum_output()?;
let arrange_update = ((key.clone(), Row::new(new_accums)), now, 1);
all_arrange_updates.push(arrange_update);
let mut key_val = key;
key_val.extend(res_val_row);
all_output_rows.push((key_val, now, 1));
Ok(())
});
}
err_collector.run(|| {
arrange.apply_updates(now, all_arrange_updates)?;
arrange.compact_to(now)
});
// this output part is not supposed to be resource intensive
// (because for every batch there wouldn't usually be as many output row?),
// so we can do some costly operation here
let output_types = all_output_rows.first().map(|(row, _, _)| {
row.iter()
.map(|v| v.data_type())
.collect::<Vec<ConcreteDataType>>()
});
if let Some(output_types) = output_types {
err_collector.run(|| {
let column_cnt = output_types.len();
let row_cnt = all_output_rows.len();
let mut output_builder = output_types
.into_iter()
.map(|t| t.create_mutable_vector(row_cnt))
.collect_vec();
for (row, _, _) in all_output_rows {
for (i, v) in row.into_iter().enumerate() {
output_builder
.get_mut(i)
.context(InternalSnafu{
reason: format!(
"Output builder should have the same length as the row, expected at most {} but got {}",
column_cnt-1,
i
)
})?
.try_push_value_ref(v.as_value_ref())
.context(DataTypeSnafu {
msg: "Failed to push value",
})?;
}
}
let output_columns = output_builder
.into_iter()
.map(|mut b| b.to_vector())
.collect_vec();
let output_batch = Batch::try_new(output_columns, row_cnt)?;
send.give(vec![output_batch]);
Ok(())
});
}
},
);
scheduler.set_cur_subgraph(subgraph);
// by default the key of output arrange
let arranged = BTreeMap::from([(
(0..output_key_arity).map(ScalarExpr::Column).collect_vec(),
Arranged::new(arrange_handler),
)]);
let bundle = CollectionBundle {
collection: Collection::from_port(out_recv_port),
arranged,
};
Ok(bundle)
}
const REDUCE: &'static str = "reduce";
/// render `Plan::Reduce` into executable dataflow
// There is a false positive in using `Vec<ScalarExpr>` as key due to `Value` have `bytes` variant
@@ -151,6 +375,18 @@ impl<'referred, 'df> Context<'referred, 'df> {
}
}
fn from_accum_values_to_live_accums(
accums: Vec<Value>,
len: usize,
) -> Result<Vec<Vec<Value>>, EvalError> {
let accum_ranges = from_val_to_slice_idx(accums.first().cloned(), len)?;
let mut accum_list = vec![];
for range in accum_ranges.iter() {
accum_list.push(accums.get(range.clone()).unwrap_or_default().to_vec());
}
Ok(accum_list)
}
/// All arrange(aka state) used in reduce operator
pub struct ReduceArrange {
/// The output arrange of reduce operator
@@ -160,33 +396,40 @@ pub struct ReduceArrange {
distinct_input: Option<Vec<ArrangeHandler>>,
}
/// split a row into key and val by evaluate the key and val plan
fn split_row_to_key_val(
row: Row,
sys_time: repr::Timestamp,
diff: repr::Diff,
fn batch_split_by_key_val(
batch: &Batch,
key_val_plan: &KeyValPlan,
row_buf: &mut Row,
) -> Result<Option<KeyValDiffRow>, EvalError> {
if let Some(key) = key_val_plan
.key_plan
.evaluate_into(&mut row.inner.clone(), row_buf)?
{
// val_plan is not supported to carry any filter predicate,
let val = key_val_plan
.val_plan
.evaluate_into(&mut row.inner.clone(), row_buf)?
.context(InternalSnafu {
reason: "val_plan should not contain any filter predicate",
})?;
Ok(Some(((key, val), sys_time, diff)))
} else {
Ok(None)
err_collector: &ErrCollector,
) -> (Batch, Batch) {
let row_count = batch.row_count();
let mut key_batch = Batch::empty();
let mut val_batch = Batch::empty();
err_collector.run(|| {
if key_val_plan.key_plan.output_arity() != 0 {
key_batch = key_val_plan.key_plan.eval_batch_into(&mut batch.clone())?;
}
if key_val_plan.val_plan.output_arity() != 0 {
val_batch = key_val_plan.val_plan.eval_batch_into(&mut batch.clone())?;
}
Ok(())
});
// deal with empty key or val
if key_batch.row_count() == 0 && key_batch.column_count() == 0 {
key_batch.set_row_count(row_count);
}
if val_batch.row_count() == 0 && val_batch.column_count() == 0 {
val_batch.set_row_count(row_count);
}
(key_batch, val_batch)
}
/// split a row into key and val by evaluate the key and val plan
fn batch_split_rows_to_key_val(
fn split_rows_to_key_val(
rows: impl IntoIterator<Item = DiffRow>,
key_val_plan: KeyValPlan,
err_collector: ErrCollector,
@@ -235,7 +478,7 @@ fn reduce_subgraph(
send,
}: SubgraphArg,
) {
let key_val = batch_split_rows_to_key_val(data, key_val_plan.clone(), err_collector.clone());
let key_val = split_rows_to_key_val(data, key_val_plan.clone(), err_collector.clone());
// from here for distinct reduce and accum reduce, things are drastically different
// for distinct reduce the arrange store the output,
// but for accum reduce the arrange store the accum state, and output is
@@ -1127,6 +1370,105 @@ mod test {
run_and_check(&mut state, &mut df, 6..7, expected, output);
}
/// Batch Mode Reduce Evaluation
/// SELECT SUM(col) FROM table
///
/// table schema:
/// | name | type |
/// |------|-------|
/// | col | Int64 |
#[test]
fn test_basic_batch_reduce_accum() {
let mut df = Hydroflow::new();
let mut state = DataflowState::default();
let now = state.current_time_ref();
let mut ctx = harness_test_ctx(&mut df, &mut state);
let rows = vec![
(Row::new(vec![1i64.into()]), 1, 1),
(Row::new(vec![2i64.into()]), 2, 1),
(Row::new(vec![3i64.into()]), 3, 1),
(Row::new(vec![1i64.into()]), 4, 1),
(Row::new(vec![2i64.into()]), 5, 1),
(Row::new(vec![3i64.into()]), 6, 1),
];
let input_plan = Plan::Constant { rows: rows.clone() };
let typ = RelationType::new(vec![ColumnType::new_nullable(
ConcreteDataType::int64_datatype(),
)]);
let key_val_plan = KeyValPlan {
key_plan: MapFilterProject::new(1).project([]).unwrap().into_safe(),
val_plan: MapFilterProject::new(1).project([0]).unwrap().into_safe(),
};
let simple_aggrs = vec![AggrWithIndex::new(
AggregateExpr {
func: AggregateFunc::SumInt64,
expr: ScalarExpr::Column(0),
distinct: false,
},
0,
0,
)];
let accum_plan = AccumulablePlan {
full_aggrs: vec![AggregateExpr {
func: AggregateFunc::SumInt64,
expr: ScalarExpr::Column(0),
distinct: false,
}],
simple_aggrs,
distinct_aggrs: vec![],
};
let reduce_plan = ReducePlan::Accumulable(accum_plan);
let bundle = ctx
.render_reduce_batch(
Box::new(input_plan.with_types(typ.into_unnamed())),
&key_val_plan,
&reduce_plan,
&RelationType::empty(),
)
.unwrap();
{
let now_inner = now.clone();
let expected = BTreeMap::<i64, Vec<i64>>::from([
(1, vec![1i64]),
(2, vec![3i64]),
(3, vec![6i64]),
(4, vec![7i64]),
(5, vec![9i64]),
(6, vec![12i64]),
]);
let collection = bundle.collection;
ctx.df
.add_subgraph_sink("test_sink", collection.into_inner(), move |_ctx, recv| {
let now = *now_inner.borrow();
let data = recv.take_inner();
let res = data.into_iter().flat_map(|v| v.into_iter()).collect_vec();
if let Some(expected) = expected.get(&now) {
let batch = expected.iter().map(|v| Value::from(*v)).collect_vec();
let batch = Batch::try_from_rows(vec![batch.into()]).unwrap();
assert_eq!(res.first(), Some(&batch));
}
});
drop(ctx);
for now in 1..7 {
state.set_current_ts(now);
state.run_available_with_schedule(&mut df);
if !state.get_err_collector().is_empty() {
panic!(
"Errors occur: {:?}",
state.get_err_collector().get_all_blocking()
)
}
}
}
}
/// SELECT SUM(col) FROM table
///
/// table schema:

Some files were not shown because too many files have changed in this diff Show More