Compare commits

...

35 Commits

Author SHA1 Message Date
Ruihang Xia
1bd53567b4 try to run on self-hosted runner
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-13 16:01:50 +08:00
Weny Xu
803940cfa4 feat: enable azblob tests (#1765)
* feat: enable azblob tests

* fix: add missing arg
2023-06-13 07:44:57 +00:00
Weny Xu
420ae054b3 chore: add debug log for heartbeat (#1770) 2023-06-13 07:43:26 +00:00
Lei, HUANG
0f1e061f24 fix: compile issue on develop and workaround to fix failing tests cau… (#1771)
* fix: compile issue on develop and workaround to fix failing tests caused by logstore file lock

* Apply suggestions from code review

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-06-13 07:30:16 +00:00
Lei, HUANG
7961de25ad feat: persist compaction time window (#1757)
* feat: persist compaction time window

* refactor: remove useless compaction window fields

* chore: revert some useless change

* fix: some CR comments

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-13 10:15:42 +08:00
Lei, HUANG
f7d98e533b chore: fix compaction caused race condition (#1759)
* fix: set max_files_in_l0 in unit tests to avoid compaction

* refactor: pass while EngineConfig

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-12 11:19:42 +00:00
Ruihang Xia
b540d640cf fix: unstable order with union operation (#1763)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 18:16:24 +08:00
Eugene Tolbakov
51a4d660b7 feat(to_unixtime): add timestamp types as arguments (#1632)
* feat(to_unixtime): add timestamp types as arguments

* feat(to_unixtime): change the return type

* feat(to_unixtime): address code review issues

* feat(to_unixtime): fix fmt issue
2023-06-12 17:21:49 +08:00
Ruihang Xia
1b2381502e fix: bring EnforceSorting rule forward (#1754)
* fix: bring EnforceSorting rule forward

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove duplicated rules

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wrap remove logic into a method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 07:29:08 +00:00
Yingwen
0e937be3f5 fix(storage): Use region_write_buffer_size as default value (#1760) 2023-06-12 15:05:17 +08:00
Weny Xu
564c183607 chore: make MetaKvBackend public (#1761) 2023-06-12 14:13:26 +08:00
Ruihang Xia
8c78368374 refactor: replace #[snafu(backtrace)] with Location (#1753)
* remove snafu backtrace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 11:55:33 +08:00
Lei, HUANG
67c16dd631 feat: optimize some parquet writer parameter (#1758) 2023-06-12 11:46:45 +08:00
Lei, HUANG
ddcee052b2 fix: order by optimization (#1748)
* add some debug log

* fix: use lazy parquet reader in MitoTable::scan_to_stream to avoid IO in plan stage

* fix: unit tests

* fix: order-by optimization

* add some tests

* fix: move metric names to metrics.rs

* fix: some cr comments
2023-06-12 11:45:43 +08:00
王听正
7efcf868d5 refactor: Remove MySQL related options from Datanode (#1756)
* refactor: Remove MySQL related options from Datanode

remove mysql_addr and mysql_runtime_size in datanode.rs, remove command line argument mysql_addr in cmd/src/datanode.rs

#1739

* feat: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739

* refactor: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739
2023-06-12 11:00:24 +08:00
dennis zhuang
f08f726bec test: s3 manifest (#1755)
* feat: change default manifest options

* test: s3 manifest

* feat: revert checkpoint_margin to 10

* Update src/object-store/src/test_util.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-06-09 10:28:41 +00:00
Ning Sun
7437820bdc ci: correct data type for input and event check (#1752) 2023-06-09 13:59:56 +08:00
Lei, HUANG
910c950717 fix: jemalloc error does not implement Error (#1747) 2023-06-09 04:00:50 +00:00
Zou Wei
f91cd250f8 feat:make version() show greptime info. (#1749)
* feat:impl get_version() to return greptime info.

* fix: refactor test case.
2023-06-09 11:38:52 +08:00
Yingwen
115d9eea8d chore: Log version and arguments (#1744) 2023-06-09 11:38:08 +08:00
Ning Sun
bc8f236806 ci: fix using env in job.if context (#1751) 2023-06-09 11:28:29 +08:00
Yiran
fdbda51c25 chore: update document links in README.md (#1745) 2023-06-09 10:05:24 +08:00
Ning Sun
e184826353 ci: allow triggering nightly release manually (#1746)
ci: allow triggering nightly manually
2023-06-09 10:04:44 +08:00
Yingwen
5b8e54e60e feat: Add HTTP API for cpu profiling (#1694)
* chore: print source error in mem-prof

* feat(common-pprof): add pprof crate

* feat(servers): Add pprof handler to router

refactor the mem_prof handler to avoid checking feature while
registering router

* feat(servers): pprof handler support different output type

* docs(common-pprof): Add readme

* feat(common-pprof): Build guard using code in pprof-rs's example

* feat(common-pprof): use prost

* feat: don't add timeout to perf api

* feat: add feature pprof

* feat: update readme

* test: fix tests

* feat: close region in TestBase

* feat(pprof): addres comments
2023-06-07 15:25:16 +08:00
Lei, HUANG
8cda1635cc feat: make jemalloc the default allocator (#1733)
* feat: add jemalloc metrics

* fix: dep format
2023-06-06 12:11:22 +00:00
Lei, HUANG
f63ddb57c3 fix: parquet time range predicate panic (#1735)
fix: parquet reader should use store schema to build time range predicate
2023-06-06 19:11:45 +08:00
fys
d2a8fd9890 feat: add route admin api in metasrv (#1734)
* feat: add route admin api in metasrv

* fix: add license
2023-06-06 18:00:02 +08:00
LFC
91026a6820 chore: clean up some of my todos (#1723)
* chore: clean up some of my todos

* fix: ci
2023-06-06 17:25:04 +08:00
Ruihang Xia
7a60bfec2a fix: empty result type on prom query endpoint (#1732)
* adjust return type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-06 15:40:54 +08:00
Niwaka
a103614fd2 feat: support /api/v1/series for Prometheus (#1620)
* feat: support /api/v1/series for Prometheus

* chore: error handling

* feat: update tests
2023-06-06 10:29:16 +08:00
Yingwen
1b4976b077 feat: Adds some metrics for write path and flush (#1726)
* feat: more metrics

* feat: Add preprocess elapsed

* chore(storage): rename metric

* test: fix tests
2023-06-05 21:35:44 +08:00
Lei, HUANG
166fb8871e chore: bump greptimedb version 0.4.0 (#1724) 2023-06-05 18:41:53 +08:00
Yingwen
466f258266 feat(servers): collect samples by metric (#1706) 2023-06-03 17:17:52 +08:00
Ruihang Xia
94228285a7 feat: convert values to vector directly (#1704)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-03 12:41:13 +08:00
JeremyHi
3d7185749d feat: insert with stream (#1703)
* feat: insert with stream

* chore: by CR
2023-06-03 03:58:00 +00:00
142 changed files with 2817 additions and 1114 deletions

View File

@@ -7,20 +7,29 @@ on:
- cron: '0 0 * * 1'
# Mannually trigger only builds binaries.
workflow_dispatch:
inputs:
dry_run:
description: 'Skip docker push and release steps'
type: boolean
default: true
skip_test:
description: 'Do not run tests during build'
type: boolean
default: false
name: Release
env:
RUST_TOOLCHAIN: nightly-2023-05-03
SCHEDULED_BUILD_VERSION_PREFIX: v0.3.0
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
SCHEDULED_PERIOD: nightly
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: false
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
jobs:
build-macos:
@@ -30,22 +39,22 @@ jobs:
# The file format is greptime-<os>-<arch>
include:
- arch: aarch64-apple-darwin
os: macos-latest
os: self-hosted
file: greptime-darwin-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
os: self-hosted
file: greptime-darwin-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
os: self-hosted
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
os: self-hosted
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
@@ -281,7 +290,7 @@ jobs:
name: Build docker image
needs: [build-linux, build-macos]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -294,7 +303,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -302,7 +311,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
@@ -367,7 +376,7 @@ jobs:
# Release artifacts only when all the artifacts are built successfully.
needs: [build-linux, build-macos, docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -377,7 +386,7 @@ jobs:
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
@@ -395,13 +404,13 @@ jobs:
fi
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: ncipollo/release-action@v1
if: github.event_name == 'schedule'
if: github.event_name != 'push'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
prerelease: ${{ env.prerelease }}
@@ -413,7 +422,7 @@ jobs:
- name: Publish release
uses: ncipollo/release-action@v1
if: github.event_name != 'schedule'
if: github.event_name == 'push'
with:
name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }}
@@ -426,7 +435,7 @@ jobs:
name: Push docker image to alibaba cloud container registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
continue-on-error: true
steps:
- name: Checkout sources
@@ -447,7 +456,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -455,7 +464,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV

341
Cargo.lock generated
View File

@@ -64,9 +64,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
version = "1.0.1"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04"
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
dependencies = [
"memchr",
]
@@ -199,7 +199,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arrow-flight",
"common-base",
@@ -831,9 +831,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]]
name = "bcder"
version = "0.7.1"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69dfb7dc0d4aee3f8c723c43553b55662badf692b541ff8e4426df75dae8da9a"
checksum = "ab26f019795af36086f2ca879aeeaae7566bdfd2fe0821a0328d3fdd9d1da2d9"
dependencies = [
"bytes",
"smallvec",
@@ -841,10 +841,10 @@ dependencies = [
[[package]]
name = "benchmarks"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arrow",
"clap 4.3.0",
"clap 4.3.2",
"client",
"indicatif",
"itertools",
@@ -1224,7 +1224,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"arc-swap",
@@ -1445,20 +1445,20 @@ dependencies = [
[[package]]
name = "clap"
version = "4.3.0"
version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc"
checksum = "401a4694d2bf92537b6867d94de48c4842089645fdcdf6c71865b175d836e9c2"
dependencies = [
"clap_builder",
"clap_derive 4.3.0",
"clap_derive 4.3.2",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.0"
version = "4.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990"
checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980"
dependencies = [
"anstream",
"anstyle",
@@ -1482,9 +1482,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.3.0"
version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b"
checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f"
dependencies = [
"heck",
"proc-macro2",
@@ -1509,7 +1509,7 @@ checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]]
name = "client"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"arrow-flight",
@@ -1534,9 +1534,10 @@ dependencies = [
"prost",
"rand",
"snafu",
"substrait 0.2.0",
"substrait 0.4.0",
"substrait 0.7.5",
"tokio",
"tokio-stream",
"tonic 0.9.2",
"tracing",
"tracing-subscriber",
@@ -1570,7 +1571,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anymap",
"build-data",
@@ -1600,9 +1601,8 @@ dependencies = [
"servers",
"session",
"snafu",
"substrait 0.2.0",
"substrait 0.4.0",
"temp-env",
"tikv-jemalloc-ctl",
"tikv-jemallocator",
"tokio",
"toml",
@@ -1633,7 +1633,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anymap",
"bitvec",
@@ -1647,7 +1647,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"chrono",
@@ -1664,7 +1664,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arrow",
"arrow-schema",
@@ -1689,7 +1689,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"snafu",
"strum",
@@ -1697,7 +1697,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arc-swap",
"chrono-tz 0.6.3",
@@ -1720,7 +1720,7 @@ dependencies = [
[[package]]
name = "common-function-macro"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arc-swap",
"backtrace",
@@ -1736,7 +1736,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"arrow-flight",
@@ -1766,7 +1766,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-trait",
@@ -1785,7 +1785,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"common-error",
"snafu",
@@ -1798,7 +1798,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"chrono",
@@ -1816,9 +1816,20 @@ dependencies = [
"tokio",
]
[[package]]
name = "common-pprof"
version = "0.4.0"
dependencies = [
"common-error",
"pprof",
"prost",
"snafu",
"tokio",
]
[[package]]
name = "common-procedure"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-stream",
"async-trait",
@@ -1840,7 +1851,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"common-procedure",
@@ -1848,7 +1859,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-trait",
@@ -1868,7 +1879,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"common-error",
"datafusion",
@@ -1884,7 +1895,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"common-error",
@@ -1900,7 +1911,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"backtrace",
"common-error",
@@ -1925,7 +1936,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"once_cell",
"rand",
@@ -1934,7 +1945,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"chrono",
"chrono-tz 0.8.2",
@@ -2078,6 +2089,15 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
[[package]]
name = "cpp_demangle"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
dependencies = [
"cfg-if 1.0.0",
]
[[package]]
name = "cpufeatures"
version = "0.2.7"
@@ -2379,7 +2399,7 @@ dependencies = [
"hashbrown 0.12.3",
"lock_api",
"once_cell",
"parking_lot_core 0.9.7",
"parking_lot_core 0.9.8",
]
[[package]]
@@ -2565,7 +2585,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-compat",
@@ -2621,7 +2641,7 @@ dependencies = [
"sql",
"storage",
"store-api",
"substrait 0.2.0",
"substrait 0.4.0",
"table",
"table-procedure",
"tokio",
@@ -2635,7 +2655,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arrow",
"arrow-array",
@@ -2655,6 +2675,15 @@ dependencies = [
"snafu",
]
[[package]]
name = "debugid"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d"
dependencies = [
"uuid",
]
[[package]]
name = "der"
version = "0.5.1"
@@ -3061,7 +3090,7 @@ dependencies = [
[[package]]
name = "file-table-engine"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"common-catalog",
@@ -3099,6 +3128,18 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "findshlibs"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64"
dependencies = [
"cc",
"lazy_static",
"libc",
"winapi",
]
[[package]]
name = "fixedbitset"
version = "0.4.2"
@@ -3113,9 +3154,9 @@ checksum = "cda653ca797810c02f7ca4b804b40b8b95ae046eb989d356bce17919a8c25499"
[[package]]
name = "flatbuffers"
version = "23.1.21"
version = "23.5.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77f5399c2c9c50ae9418e522842ad362f61ee48b346ac106807bd355a8a7c619"
checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640"
dependencies = [
"bitflags 1.3.2",
"rustc_version 0.4.0",
@@ -3140,9 +3181,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "form_urlencoded"
version = "1.1.0"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
dependencies = [
"percent-encoding",
]
@@ -3158,7 +3199,7 @@ dependencies = [
[[package]]
name = "frontend"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-compat",
@@ -3212,7 +3253,7 @@ dependencies = [
"storage",
"store-api",
"strfmt",
"substrait 0.2.0",
"substrait 0.4.0",
"table",
"tokio",
"toml",
@@ -4347,9 +4388,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
version = "0.3.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [
"unicode-bidi",
"unicode-normalization",
@@ -4384,9 +4425,9 @@ dependencies = [
[[package]]
name = "indicatif"
version = "0.17.4"
version = "0.17.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8"
checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057"
dependencies = [
"console",
"instant",
@@ -4401,6 +4442,24 @@ version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
[[package]]
name = "inferno"
version = "0.11.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc"
dependencies = [
"ahash 0.8.3",
"indexmap",
"is-terminal",
"itoa",
"log",
"num-format",
"once_cell",
"quick-xml 0.26.0",
"rgb",
"str_stack",
]
[[package]]
name = "influxdb_line_protocol"
version = "0.1.0"
@@ -4698,9 +4757,9 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.144"
version = "0.2.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1"
checksum = "fc86cde3ff845662b8f4ef6cb50ea0e20c524eb3d29ae048287e06a1b3fa6a81"
[[package]]
name = "libgit2-sys"
@@ -4784,9 +4843,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "lock_api"
version = "0.4.9"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
dependencies = [
"autocfg",
"scopeguard",
@@ -4800,7 +4859,7 @@ checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]]
name = "log-store"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arc-swap",
"async-stream",
@@ -5062,7 +5121,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-trait",
@@ -5090,7 +5149,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anymap",
"api",
@@ -5282,7 +5341,7 @@ dependencies = [
[[package]]
name = "mito"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anymap",
"arc-swap",
@@ -5306,6 +5365,7 @@ dependencies = [
"futures",
"key-lock",
"log-store",
"metrics",
"object-store",
"serde",
"serde_json",
@@ -5642,6 +5702,16 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "num-format"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3"
dependencies = [
"arrayvec",
"itoa",
]
[[package]]
name = "num-integer"
version = "0.1.45"
@@ -5733,16 +5803,16 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "object"
version = "0.30.3"
version = "0.30.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385"
dependencies = [
"memchr",
]
[[package]]
name = "object-store"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anyhow",
"async-trait",
@@ -5781,9 +5851,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.17.2"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "oorandom"
@@ -6024,7 +6094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core 0.9.7",
"parking_lot_core 0.9.8",
]
[[package]]
@@ -6043,18 +6113,18 @@ dependencies = [
[[package]]
name = "parking_lot_core"
version = "0.9.7"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521"
checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
"libc",
"petgraph",
"redox_syscall 0.2.16",
"redox_syscall 0.3.5",
"smallvec",
"thread-id",
"windows-sys 0.45.0",
"windows-targets 0.48.0",
]
[[package]]
@@ -6115,7 +6185,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-trait",
@@ -6194,9 +6264,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pest"
@@ -6518,6 +6588,32 @@ dependencies = [
"postgres-protocol",
]
[[package]]
name = "pprof"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
"findshlibs",
"inferno",
"libc",
"log",
"nix 0.26.2",
"once_cell",
"parking_lot 0.12.1",
"prost",
"prost-build",
"prost-derive",
"protobuf",
"sha2",
"smallvec",
"symbolic-demangle",
"tempfile",
"thiserror",
]
[[package]]
name = "ppv-lite86"
version = "0.2.17"
@@ -6676,7 +6772,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-recursion",
"async-trait",
@@ -6926,7 +7022,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"ahash 0.8.3",
"approx_eq",
@@ -6980,12 +7076,21 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
"substrait 0.2.0",
"substrait 0.4.0",
"table",
"tokio",
"tokio-stream",
]
[[package]]
name = "quick-xml"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd"
dependencies = [
"memchr",
]
[[package]]
name = "quick-xml"
version = "0.27.1"
@@ -7174,11 +7279,11 @@ dependencies = [
[[package]]
name = "regex"
version = "1.8.3"
version = "1.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390"
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
dependencies = [
"aho-corasick 1.0.1",
"aho-corasick 1.0.2",
"memchr",
"regex-syntax 0.7.2",
]
@@ -7341,6 +7446,15 @@ dependencies = [
"thiserror",
]
[[package]]
name = "rgb"
version = "0.8.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59"
dependencies = [
"bytemuck",
]
[[package]]
name = "ring"
version = "0.16.20"
@@ -8138,7 +8252,7 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "script"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arrow",
"async-trait",
@@ -8393,7 +8507,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"aide",
"api",
@@ -8413,6 +8527,7 @@ dependencies = [
"common-grpc",
"common-grpc-expr",
"common-mem-prof",
"common-pprof",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -8461,6 +8576,7 @@ dependencies = [
"sql",
"strum",
"table",
"tikv-jemalloc-ctl",
"tokio",
"tokio-postgres",
"tokio-postgres-rustls",
@@ -8475,7 +8591,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arc-swap",
"common-catalog",
@@ -8750,7 +8866,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"common-base",
@@ -8796,7 +8912,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"client",
@@ -8940,6 +9056,12 @@ dependencies = [
"optional",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "static_assertions"
version = "1.1.0"
@@ -8971,7 +9093,7 @@ dependencies = [
[[package]]
name = "storage"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"arc-swap",
"arrow",
@@ -9022,7 +9144,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-stream",
"async-trait",
@@ -9047,6 +9169,12 @@ version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0"
[[package]]
name = "str_stack"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb"
[[package]]
name = "streaming-stats"
version = "0.2.3"
@@ -9131,7 +9259,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-recursion",
"async-trait",
@@ -9203,6 +9331,29 @@ version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "symbolic-common"
version = "10.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737"
dependencies = [
"debugid",
"memmap2",
"stable_deref_trait",
"uuid",
]
[[package]]
name = "symbolic-demangle"
version = "10.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489"
dependencies = [
"cpp_demangle",
"rustc-demangle",
"symbolic-common",
]
[[package]]
name = "syn"
version = "1.0.109"
@@ -9263,7 +9414,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"anymap",
"async-trait",
@@ -9299,7 +9450,7 @@ dependencies = [
[[package]]
name = "table-procedure"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"async-trait",
"catalog",
@@ -9392,7 +9543,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.2.0"
version = "0.4.0"
dependencies = [
"api",
"async-trait",
@@ -10492,9 +10643,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]]
name = "url"
version = "2.3.1"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb"
dependencies = [
"form_urlencoded",
"idna",
@@ -11071,9 +11222,9 @@ dependencies = [
[[package]]
name = "xml-rs"
version = "0.8.13"
version = "0.8.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d8f380ae16a37b30e6a2cf67040608071384b1450c189e61bea3ff57cde922d"
checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c"
[[package]]
name = "xz2"

View File

@@ -17,6 +17,7 @@ members = [
"src/common/meta",
"src/common/procedure",
"src/common/procedure-test",
"src/common/pprof",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
@@ -49,7 +50,7 @@ members = [
]
[workspace.package]
version = "0.2.0"
version = "0.4.0"
edition = "2021"
license = "Apache-2.0"

View File

@@ -106,7 +106,7 @@ Please see [the online document site](https://docs.greptime.com/getting-started/
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
## Resources
@@ -123,7 +123,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)

View File

@@ -51,7 +51,7 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false

View File

@@ -115,7 +115,7 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false

View File

@@ -41,7 +41,7 @@ pub enum Error {
))]
ConvertColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -52,7 +52,7 @@ pub enum Error {
))]
InvalidColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
}

View File

@@ -32,18 +32,18 @@ pub enum Error {
source
))]
CompileScriptInternal {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table, source: {}", source))]
OpenSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table, source: {}", source))]
CreateSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -54,7 +54,7 @@ pub enum Error {
))]
CreateTable {
table_info: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -94,7 +94,7 @@ pub enum Error {
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound {
engine_name: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -132,7 +132,7 @@ pub enum Error {
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable {
table_info: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -147,13 +147,13 @@ pub enum Error {
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch, source: {}", source))]
CreateRecordBatch {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -162,7 +162,7 @@ pub enum Error {
source
))]
InsertCatalogRecord {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -173,7 +173,7 @@ pub enum Error {
))]
DeregisterTable {
request: DeregisterTableRequest,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -182,36 +182,36 @@ pub enum Error {
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("{source}"))]
Internal {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec {
#[snafu(backtrace)]
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue {
#[snafu(backtrace)]
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
MetaSrv {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog, source: {}", source))]
InvalidTableInfoInCatalog {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -230,7 +230,7 @@ pub enum Error {
#[snafu(display("Table schema mismatch, source: {}", source))]
TableSchemaMismatch {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -258,7 +258,7 @@ impl ErrorExt for Error {
Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } => source.status_code(),
@@ -275,14 +275,14 @@ impl ErrorExt for Error {
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. }
| Error::TableSchemaMismatch { source } => source.status_code(),
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source } | Error::Internal { source } => {
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}

View File

@@ -17,7 +17,7 @@ use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
pub use client::CachedMetaKvBackend;
pub use client::{CachedMetaKvBackend, MetaKvBackend};
use futures::Stream;
use futures_util::StreamExt;
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};

View File

@@ -30,12 +30,13 @@ parking_lot = "0.12"
prost.workspace = true
rand.workspace = true
snafu.workspace = true
tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true
tonic.workspace = true
[dev-dependencies]
datanode = { path = "../datanode" }
substrait = { path = "../common/substrait" }
tokio.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
prost.workspace = true

View File

@@ -31,7 +31,6 @@ pub struct DatanodeClients {
impl Default for DatanodeClients {
fn default() -> Self {
// TODO(LFC): Make this channel config configurable.
let config = ChannelConfig::new().timeout(Duration::from_secs(8));
Self {

View File

@@ -29,6 +29,9 @@ use common_telemetry::{logging, timer};
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
use tokio::sync::mpsc::Sender;
use tokio::sync::{mpsc, OnceCell};
use tokio_stream::wrappers::ReceiverStream;
use crate::error::{
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
@@ -47,6 +50,7 @@ pub struct Database {
dbname: String,
client: Client,
streaming_client: OnceCell<Sender<GreptimeRequest>>,
ctx: FlightContext,
}
@@ -58,6 +62,7 @@ impl Database {
schema: schema.into(),
dbname: "".to_string(),
client,
streaming_client: OnceCell::new(),
ctx: FlightContext::default(),
}
}
@@ -75,6 +80,7 @@ impl Database {
schema: "".to_string(),
dbname: dbname.into(),
client,
streaming_client: OnceCell::new(),
ctx: FlightContext::default(),
}
}
@@ -114,6 +120,22 @@ impl Database {
self.handle(Request::Inserts(requests)).await
}
pub async fn insert_to_stream(&self, requests: InsertRequests) -> Result<()> {
let streaming_client = self
.streaming_client
.get_or_try_init(|| self.client_stream())
.await?;
let request = self.to_rpc_request(Request::Inserts(requests));
streaming_client.send(request).await.map_err(|e| {
error::ClientStreamingSnafu {
err_msg: e.to_string(),
}
.build()
})
}
pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
self.handle(Request::Delete(request)).await
@@ -121,15 +143,7 @@ impl Database {
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
};
let request = self.to_rpc_request(request);
let response = client
.handle(request)
.await?
@@ -142,6 +156,27 @@ impl Database {
Ok(value)
}
#[inline]
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
}
}
async fn client_stream(&self) -> Result<Sender<GreptimeRequest>> {
let mut client = self.client.make_database_client()?.inner;
let (sender, receiver) = mpsc::channel::<GreptimeRequest>(65536);
let receiver = ReceiverStream::new(receiver);
client.handle_requests(receiver).await?;
Ok(sender)
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_SQL);
self.do_get(Request::Query(QueryRequest {
@@ -212,22 +247,13 @@ impl Database {
async fn do_get(&self, request: Request) -> Result<Output> {
// FIXME(paomian): should be added some labels for metrics
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
};
let request = self.to_rpc_request(request);
let request = Ticket {
ticket: request.encode_to_vec().into(),
};
let mut client = self.client.make_flight_client()?;
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
.mut_inner()
.do_get(request)

View File

@@ -34,13 +34,13 @@ pub enum Error {
#[snafu(display("Failed to convert FlightData, source: {}", source))]
ConvertFlightData {
#[snafu(backtrace)]
location: Location,
source: common_grpc::Error,
},
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
location: Location,
source: api::error::Error,
},
@@ -57,7 +57,7 @@ pub enum Error {
))]
CreateChannel {
addr: String,
#[snafu(backtrace)]
location: Location,
source: common_grpc::error::Error,
},
@@ -67,6 +67,9 @@ pub enum Error {
#[snafu(display("Illegal Database response: {err_msg}"))]
IllegalDatabaseResponse { err_msg: String },
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming { err_msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -77,11 +80,12 @@ impl ErrorExt for Error {
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
| Error::IllegalDatabaseResponse { .. }
| Error::ClientStreaming { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,

View File

@@ -10,7 +10,6 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
tokio-console = ["common-telemetry/tokio-console"]
[dependencies]
@@ -42,8 +41,7 @@ servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tikv-jemallocator = "0.5"
tokio.workspace = true
[dev-dependencies]

View File

@@ -180,15 +180,19 @@ fn full_version() -> &'static str {
)
}
#[cfg(feature = "mem-prof")]
fn log_env_flags() {
info!("command line arguments");
for argument in std::env::args() {
info!("argument: {}", argument);
}
}
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();
// TODO(dennis):
// 1. adds ip/port to app
let app_name = &cmd.subcmd.to_string();
let opts = cmd.load_options()?;
@@ -205,6 +209,14 @@ async fn main() -> Result<()> {
// Report app version as gauge.
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
// Log version and argument flags.
info!(
"short_version: {}, full_version: {}",
short_version(),
full_version()
);
log_env_flags();
let mut app = cmd.build(opts).await?;
tokio::select! {

View File

@@ -84,8 +84,6 @@ struct StartCommand {
rpc_addr: Option<String>,
#[clap(long)]
rpc_hostname: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long, multiple = true, value_delimiter = ',')]
metasrv_addr: Option<Vec<String>>,
#[clap(short, long)]
@@ -126,10 +124,6 @@ impl StartCommand {
opts.rpc_hostname = self.rpc_hostname.clone();
}
if let Some(addr) = &self.mysql_addr {
opts.mysql_addr = addr.clone();
}
if let Some(node_id) = self.node_id {
opts.node_id = Some(node_id);
}
@@ -205,8 +199,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
@@ -252,8 +244,6 @@ mod tests {
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!("/other/wal", options.wal.dir.unwrap());
@@ -368,8 +358,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
timeout_millis = 3000

View File

@@ -24,43 +24,43 @@ use snafu::Location;
pub enum Error {
#[snafu(display("Failed to start datanode, source: {}", source))]
StartDatanode {
#[snafu(backtrace)]
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
@@ -72,14 +72,14 @@ pub enum Error {
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
#[snafu(backtrace)]
location: Location,
source: servers::auth::Error,
},
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
UnsupportedSelectorType {
selector_type: String,
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
@@ -101,44 +101,44 @@ pub enum Error {
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
RequestDatabase {
sql: String,
#[snafu(backtrace)]
location: Location,
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
CollectRecordBatches {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
PrettyPrintRecordBatches {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
StartMetaClient {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
ParseSql {
sql: String,
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
SubstraitEncodeLogicalPlan {
#[snafu(backtrace)]
location: Location,
source: substrait::error::Error,
},
@@ -150,7 +150,7 @@ pub enum Error {
#[snafu(display("Failed to start catalog manager, source: {}", source))]
StartCatalogManager {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
}
@@ -160,13 +160,13 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::StartDatanode { source, .. } => source.status_code(),
Error::StartFrontend { source, .. } => source.status_code(),
Error::ShutdownDatanode { source, .. } => source.status_code(),
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
@@ -175,15 +175,14 @@ impl ErrorExt for Error {
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
source.status_code()
}
Error::StartMetaClient { source } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
Error::StartCatalogManager { source } => source.status_code(),
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
}
}

View File

@@ -16,13 +16,16 @@ use std::fmt;
use std::str::FromStr;
use std::sync::Arc;
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, Volatility};
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
use datatypes::types::TimestampType;
use datatypes::vectors::{
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
};
use snafu::ensure;
use crate::scalars::function::{Function, FunctionContext};
@@ -42,18 +45,33 @@ fn convert_to_seconds(arg: &str) -> Option<i64> {
}
}
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
(0..vector.len())
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
.collect::<Vec<Option<i64>>>()
}
impl Function for ToUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_second_datatype())
Ok(ConcreteDataType::int64_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(
vec![ConcreteDataType::String(StringType)],
Signature::uniform(
1,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
],
Volatility::Immutable,
)
}
@@ -61,7 +79,7 @@ impl Function for ToUnixtimeFunction {
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
error::InvalidFuncArgsSnafu {
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
@@ -79,6 +97,42 @@ impl Function for ToUnixtimeFunction {
.collect::<Vec<_>>(),
)))
}
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
let array = columns[0].to_arrow_array();
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
}
ConcreteDataType::Timestamp(ts) => {
let array = columns[0].to_arrow_array();
let value = match ts {
TimestampType::Second(_) => {
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
array
)
.unwrap());
process_vector(&vector)
}
TimestampType::Millisecond(_) => {
let vector = paste::expr!(
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
);
process_vector(&vector)
}
TimestampType::Microsecond(_) => {
let vector = paste::expr!(
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
);
process_vector(&vector)
}
TimestampType::Nanosecond(_) => {
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
array
)
.unwrap());
process_vector(&vector)
}
};
Ok(Arc::new(Int64Vector::from(value)))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
@@ -97,28 +151,37 @@ impl fmt::Display for ToUnixtimeFunction {
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
use datatypes::scalars::ScalarVector;
use datatypes::timestamp::TimestampSecond;
use datatypes::value::Value;
use datatypes::vectors::StringVector;
use datatypes::vectors::{StringVector, TimestampSecondVector};
use super::{ToUnixtimeFunction, *};
use crate::scalars::Function;
#[test]
fn test_to_unixtime() {
fn test_string_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Exact(valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::String(StringType)]
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
));
let times = vec![
@@ -145,4 +208,106 @@ mod tests {
}
}
}
#[test]
fn test_int_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
));
let times = vec![Some(3_i64), None, Some(5_i64), None];
let results = vec![Some(3), None, Some(5), None];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_timestamp_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
));
let times: Vec<Option<TimestampSecond>> = vec![
Some(TimestampSecond::new(123)),
None,
Some(TimestampSecond::new(42)),
None,
];
let results = vec![Some(123), None, Some(42), None];
let ts_vector: TimestampSecondVector = build_vector_from_slice(&times);
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
let mut builder = T::Builder::with_capacity(items.len());
for item in items {
builder.push(*item);
}
builder.finish()
}
}

View File

@@ -16,7 +16,6 @@ use std::collections::HashMap;
use api::helper::ColumnDataTypeWrapper;
use api::v1::{Column, DeleteRequest as GrpcDeleteRequest};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use snafu::{ensure, ResultExt};
use table::requests::DeleteRequest;
@@ -41,14 +40,11 @@ pub fn to_table_delete_request(request: GrpcDeleteRequest) -> Result<DeleteReque
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
.context(ColumnDataTypeSnafu)?
.into();
let vector_builder = &mut datatype.create_mutable_vector(row_count);
add_values_to_builder(vector_builder, values, row_count, null_mask)?;
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
ensure!(
key_column_values
.insert(column_name.clone(), vector_builder.to_vector())
.insert(column_name.clone(), vector)
.is_none(),
IllegalDeleteRequestSnafu {
reason: format!("Duplicated column '{column_name}' in delete request.")

View File

@@ -32,7 +32,7 @@ pub enum Error {
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
location: Location,
source: api::error::Error,
},
@@ -54,7 +54,7 @@ pub enum Error {
InvalidColumnProto { err_msg: String, location: Location },
#[snafu(display("Failed to create vector, source: {}", source))]
CreateVector {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -68,13 +68,13 @@ pub enum Error {
))]
InvalidColumnDef {
column: String,
#[snafu(backtrace)]
location: Location,
source: api::error::Error,
},
#[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::column::{SemanticType, Values};
@@ -25,10 +26,16 @@ use common_time::timestamp::Timestamp;
use common_time::{Date, DateTime};
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{ValueRef, VectorRef};
use datatypes::scalars::ScalarVector;
use datatypes::schema::SchemaRef;
use datatypes::types::TimestampType;
use datatypes::types::{Int16Type, Int8Type, TimestampType, UInt16Type, UInt8Type};
use datatypes::value::Value;
use datatypes::vectors::MutableVector;
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int32Vector, Int64Vector, PrimitiveVector, StringVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector,
};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use table::requests::InsertRequest;
@@ -287,15 +294,10 @@ pub fn to_table_insert_request(
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
.context(ColumnDataTypeSnafu)?
.into();
let vector_builder = &mut datatype.create_mutable_vector(row_count);
add_values_to_builder(vector_builder, values, row_count, null_mask)?;
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
ensure!(
columns_values
.insert(column_name.clone(), vector_builder.to_vector())
.is_none(),
columns_values.insert(column_name.clone(), vector).is_none(),
ColumnAlreadyExistsSnafu {
column: column_name
}
@@ -312,28 +314,16 @@ pub fn to_table_insert_request(
}
pub(crate) fn add_values_to_builder(
builder: &mut Box<dyn MutableVector>,
data_type: ConcreteDataType,
values: Values,
row_count: usize,
null_mask: Vec<u8>,
) -> Result<()> {
let data_type = builder.data_type();
let values = convert_values(&data_type, values);
) -> Result<VectorRef> {
if null_mask.is_empty() {
ensure!(
values.len() == row_count,
UnexpectedValuesLengthSnafu {
reason: "If null_mask is empty, the length of values must be equal to row_count."
}
);
values.iter().try_for_each(|value| {
builder
.try_push_value_ref(value.as_value_ref())
.context(CreateVectorSnafu)
})?;
Ok(values_to_vector(&data_type, values))
} else {
let builder = &mut data_type.create_mutable_vector(row_count);
let values = convert_values(&data_type, values);
let null_mask = BitVec::from_vec(null_mask);
ensure!(
null_mask.count_ones() + values.len() == row_count,
@@ -354,8 +344,53 @@ pub(crate) fn add_values_to_builder(
}
}
}
Ok(builder.to_vector())
}
}
fn values_to_vector(data_type: &ConcreteDataType, values: Values) -> VectorRef {
match data_type {
ConcreteDataType::Boolean(_) => Arc::new(BooleanVector::from(values.bool_values)),
ConcreteDataType::Int8(_) => Arc::new(PrimitiveVector::<Int8Type>::from_iter_values(
values.i8_values.into_iter().map(|x| x as i8),
)),
ConcreteDataType::Int16(_) => Arc::new(PrimitiveVector::<Int16Type>::from_iter_values(
values.i16_values.into_iter().map(|x| x as i16),
)),
ConcreteDataType::Int32(_) => Arc::new(Int32Vector::from_vec(values.i32_values)),
ConcreteDataType::Int64(_) => Arc::new(Int64Vector::from_vec(values.i64_values)),
ConcreteDataType::UInt8(_) => Arc::new(PrimitiveVector::<UInt8Type>::from_iter_values(
values.u8_values.into_iter().map(|x| x as u8),
)),
ConcreteDataType::UInt16(_) => Arc::new(PrimitiveVector::<UInt16Type>::from_iter_values(
values.u16_values.into_iter().map(|x| x as u16),
)),
ConcreteDataType::UInt32(_) => Arc::new(UInt32Vector::from_vec(values.u32_values)),
ConcreteDataType::UInt64(_) => Arc::new(UInt64Vector::from_vec(values.u64_values)),
ConcreteDataType::Float32(_) => Arc::new(Float32Vector::from_vec(values.f32_values)),
ConcreteDataType::Float64(_) => Arc::new(Float64Vector::from_vec(values.f64_values)),
ConcreteDataType::Binary(_) => Arc::new(BinaryVector::from(values.binary_values)),
ConcreteDataType::String(_) => Arc::new(StringVector::from_vec(values.string_values)),
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
ConcreteDataType::DateTime(_) => Arc::new(DateTimeVector::from_vec(values.datetime_values)),
ConcreteDataType::Timestamp(unit) => match unit {
TimestampType::Second(_) => {
Arc::new(TimestampSecondVector::from_vec(values.ts_second_values))
}
TimestampType::Millisecond(_) => Arc::new(TimestampMillisecondVector::from_vec(
values.ts_millisecond_values,
)),
TimestampType::Microsecond(_) => Arc::new(TimestampMicrosecondVector::from_vec(
values.ts_microsecond_values,
)),
TimestampType::Nanosecond(_) => Arc::new(TimestampNanosecondVector::from_vec(
values.ts_nanosecond_values,
)),
},
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
}
Ok(())
}
fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {

View File

@@ -53,7 +53,7 @@ pub enum Error {
#[snafu(display("Failed to create RecordBatch, source: {}", source))]
CreateRecordBatch {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -71,7 +71,7 @@ pub enum Error {
#[snafu(display("Failed to convert Arrow Schema, source: {}", source))]
ConvertArrowSchema {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
}
@@ -88,8 +88,8 @@ impl ErrorExt for Error {
| Error::Conversion { .. }
| Error::DecodeFlightData { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source } => source.status_code(),
Error::ConvertArrowSchema { source } => source.status_code(),
Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::ConvertArrowSchema { source, .. } => source.status_code(),
}
}

View File

@@ -23,7 +23,7 @@ pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to read OPT_PROF"))]
#[snafu(display("Failed to read OPT_PROF, source: {}", source))]
ReadOptProf { source: tikv_jemalloc_ctl::Error },
#[snafu(display("Memory profiling is not enabled"))]
@@ -32,13 +32,17 @@ pub enum Error {
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, location: Location },
#[snafu(display("Failed to open temp file: {}", path))]
#[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
OpenTempFile {
path: String,
source: std::io::Error,
},
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
#[snafu(display(
"Failed to dump profiling data to temp file: {:?}, source: {}",
path,
source
))]
DumpProfileData {
path: PathBuf,
source: tikv_jemalloc_ctl::Error,

View File

@@ -0,0 +1,16 @@
[package]
name = "common-pprof"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
pprof = { version = "0.11", features = [
"flamegraph",
"prost-codec",
"protobuf",
] }
prost.workspace = true
snafu.workspace = true
tokio.workspace = true

View File

@@ -0,0 +1,28 @@
# Profiling CPU
## Build GreptimeDB with `pprof` feature
```bash
cargo build --features=pprof
```
## HTTP API
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
```bash
curl -s '0:4000/v1/prof/cpu' > /tmp/pprof.out
```
Then you can use `pprof` command with the protobuf file.
```bash
go tool pprof -top /tmp/pprof.out
```
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
```bash
curl -s '0:4000/v1/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
```
Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash
curl -s '0:4000/v1/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
```

124
src/common/pprof/src/lib.rs Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::time::Duration;
use common_error::prelude::{ErrorExt, StatusCode};
use prost::Message;
use snafu::{Location, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Failed to create profiler guard, source: {}, location: {}",
source,
location
))]
CreateGuard {
source: pprof::Error,
location: Location,
},
#[snafu(display("Failed to create report, source: {}, location: {}", source, location))]
CreateReport {
source: pprof::Error,
location: Location,
},
#[snafu(display(
"Failed to create flamegraph, source: {}, location: {}",
source,
location
))]
CreateFlamegraph {
source: pprof::Error,
location: Location,
},
#[snafu(display(
"Failed to create pprof report, source: {}, location: {}",
source,
location
))]
ReportPprof {
source: pprof::Error,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
StatusCode::Unexpected
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// CPU profiler utility.
// Inspired by https://github.com/datafuselabs/databend/blob/67f445e83cd4eceda98f6c1c114858929d564029/src/common/base/src/base/profiling.rs
#[derive(Debug)]
pub struct Profiling {
/// Sample duration.
duration: Duration,
/// Sample frequency.
frequency: i32,
}
impl Profiling {
/// Creates a new profiler.
pub fn new(duration: Duration, frequency: i32) -> Profiling {
Profiling {
duration,
frequency,
}
}
/// Profiles and returns a generated pprof report.
pub async fn report(&self) -> Result<pprof::Report> {
let guard = pprof::ProfilerGuardBuilder::default()
.frequency(self.frequency)
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()
.context(CreateGuardSnafu)?;
tokio::time::sleep(self.duration).await;
guard.report().build().context(CreateReportSnafu)
}
/// Profiles and returns a generated flamegraph.
pub async fn dump_flamegraph(&self) -> Result<Vec<u8>> {
let mut body: Vec<u8> = Vec::new();
let report = self.report().await?;
report
.flamegraph(&mut body)
.context(CreateFlamegraphSnafu)?;
Ok(body)
}
/// Profiles and returns a generated proto.
pub async fn dump_proto(&self) -> Result<Vec<u8>> {
let report = self.report().await?;
// Generate googles pprof format report.
let profile = report.pprof().context(ReportPprofSnafu)?;
let body = profile.encode_to_vec();
Ok(body)
}
}

View File

@@ -29,10 +29,7 @@ pub enum Error {
"Failed to execute procedure due to external error, source: {}",
source
))]
External {
#[snafu(backtrace)]
source: BoxedError,
},
External { source: BoxedError },
#[snafu(display("Loader {} is already registered", name))]
LoaderConflict { name: String, location: Location },
@@ -52,7 +49,7 @@ pub enum Error {
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
PutState {
key: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -65,14 +62,14 @@ pub enum Error {
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
DeleteStates {
keys: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
ListState {
path: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -83,10 +80,7 @@ pub enum Error {
},
#[snafu(display("Procedure exec failed, source: {}", source))]
RetryLater {
#[snafu(backtrace)]
source: BoxedError,
},
RetryLater { source: BoxedError },
#[snafu(display("Procedure panics, procedure_id: {}", procedure_id))]
ProcedurePanic { procedure_id: ProcedureId },

View File

@@ -40,7 +40,7 @@ pub enum Error {
source
))]
UdfTempRecordBatch {
#[snafu(backtrace)]
location: Location,
source: RecordbatchError,
},
@@ -65,19 +65,19 @@ pub enum Error {
#[snafu(display("Fail to cast scalar value into vector: {}", source))]
FromScalarValue {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
},
#[snafu(display("Fail to cast arrow array into vector: {}", source))]
FromArrowArray {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
},
#[snafu(display("Fail to cast arrow array into vector: {:?}, {}", data_type, source))]
IntoVector {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
data_type: ArrowDatatype,
},
@@ -93,7 +93,7 @@ pub enum Error {
#[snafu(display("Invalid input type: {}", err_msg))]
InvalidInputType {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
err_msg: String,
},
@@ -120,19 +120,19 @@ pub enum Error {
source
))]
ConvertDfRecordBatchStream {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to convert arrow schema, source: {}", source))]
ConvertArrowSchema {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
},
#[snafu(display("Failed to execute physical plan, source: {}", source))]
ExecutePhysicalPlan {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -154,13 +154,13 @@ pub enum Error {
#[snafu(display("Query engine fail to cast value: {}", source))]
ToScalarValue {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
},
#[snafu(display("Failed to get scalar vector, {}", source))]
GetScalarVector {
#[snafu(backtrace)]
location: Location,
source: DataTypeError,
},
@@ -188,9 +188,9 @@ impl ErrorExt for Error {
Error::InvalidInputType { source, .. }
| Error::IntoVector { source, .. }
| Error::FromScalarValue { source }
| Error::ConvertArrowSchema { source }
| Error::FromArrowArray { source } => source.status_code(),
| Error::FromScalarValue { source, .. }
| Error::ConvertArrowSchema { source, .. }
| Error::FromArrowArray { source, .. } => source.status_code(),
Error::ExecuteRepeatedly { .. } | Error::GeneralDataFusion { .. } => {
StatusCode::Unexpected
@@ -201,7 +201,7 @@ impl ErrorExt for Error {
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
Error::ExecutePhysicalPlan { source } => source.status_code(),
Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
}
}
@@ -215,9 +215,3 @@ impl From<Error> for DataFusionError {
DataFusionError::External(Box::new(e))
}
}
impl From<BoxedError> for Error {
fn from(source: BoxedError) -> Self {
Error::ExecutePhysicalPlan { source }
}
}

View File

@@ -172,7 +172,6 @@ impl DfAccumulator for DfAccumulatorAdaptor {
}
fn size(&self) -> usize {
// TODO(LFC): Implement new "size" method for Accumulator.
0
}
}

View File

@@ -194,7 +194,6 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
}
fn statistics(&self) -> Statistics {
// TODO(LFC): impl statistics
Statistics::default()
}
}

View File

@@ -225,6 +225,7 @@ mod test {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use datatypes::vectors::Int32Vector;
use snafu::IntoError;
use super::*;
use crate::RecordBatches;
@@ -296,9 +297,8 @@ mod test {
let poll_err_stream = new_future_stream(Ok(vec![
Ok(batch1.clone()),
Err(error::Error::External {
source: BoxedError::new(MockError::new(StatusCode::Unknown)),
}),
Err(error::ExternalSnafu
.into_error(BoxedError::new(MockError::new(StatusCode::Unknown)))),
]));
let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), poll_err_stream);
let result = RecordBatches::try_collect(Box::pin(adapter)).await;
@@ -307,9 +307,9 @@ mod test {
"Failed to poll stream, source: External error: External error, source: Unknown"
);
let failed_to_init_stream = new_future_stream(Err(error::Error::External {
source: BoxedError::new(MockError::new(StatusCode::Internal)),
}));
let failed_to_init_stream =
new_future_stream(Err(error::ExternalSnafu
.into_error(BoxedError::new(MockError::new(StatusCode::Internal)))));
let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), failed_to_init_stream);
let result = RecordBatches::try_collect(Box::pin(adapter)).await;
assert_eq!(

View File

@@ -33,13 +33,13 @@ pub enum Error {
#[snafu(display("Data types error, source: {}", source))]
DataTypes {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("External error, source: {}", source))]
External {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -99,7 +99,7 @@ pub enum Error {
CastVector {
from_type: ConcreteDataType,
to_type: ConcreteDataType,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
}
@@ -117,7 +117,7 @@ impl ErrorExt for Error {
| Error::ColumnNotExists { .. }
| Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal,
Error::External { source } => source.status_code(),
Error::External { source, .. } => source.status_code(),
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
source.status_code()

View File

@@ -74,7 +74,7 @@ pub enum Error {
#[snafu(display("Internal error: {}", source))]
Internal {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -96,14 +96,14 @@ pub enum Error {
#[snafu(display("Failed to convert DataFusion schema, source: {}", source))]
ConvertDfSchema {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Unable to resolve table: {table_name}, error: {source}"))]
ResolveTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -141,7 +141,7 @@ impl ErrorExt for Error {
| Error::Internal { .. }
| Error::EncodeDfPlan { .. }
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
Error::ConvertDfSchema { source } => source.status_code(),
Error::ConvertDfSchema { source, .. } => source.status_code(),
Error::ResolveTable { source, .. } => source.status_code(),
}
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Debug, Display, Formatter};
use crate::timestamp::TimeUnit;
use crate::timestamp_millis::TimestampMillis;
use crate::Timestamp;
@@ -193,6 +195,38 @@ impl<T: PartialOrd> GenericRange<T> {
pub type TimestampRange = GenericRange<Timestamp>;
impl Display for TimestampRange {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let s = match (&self.start, &self.end) {
(Some(start), Some(end)) => {
format!(
"TimestampRange{{[{}{},{}{})}}",
start.value(),
start.unit().short_name(),
end.value(),
end.unit().short_name()
)
}
(Some(start), None) => {
format!(
"TimestampRange{{[{}{},#)}}",
start.value(),
start.unit().short_name()
)
}
(None, Some(end)) => {
format!(
"TimestampRange{{[#,{}{})}}",
end.value(),
end.unit().short_name()
)
}
(None, None) => "TimestampRange{{[#,#)}}".to_string(),
};
f.write_str(&s)
}
}
impl TimestampRange {
/// Create a TimestampRange with optional inclusive end timestamp.
/// If end timestamp is present and is less than start timestamp, this method will return

View File

@@ -336,6 +336,15 @@ impl TimeUnit {
TimeUnit::Nanosecond => 1,
}
}
pub(crate) fn short_name(&self) -> &'static str {
match self {
TimeUnit::Second => "s",
TimeUnit::Millisecond => "ms",
TimeUnit::Microsecond => "us",
TimeUnit::Nanosecond => "ns",
}
}
}
impl PartialOrd for Timestamp {

View File

@@ -221,7 +221,7 @@ impl Default for RegionManifestConfig {
fn default() -> Self {
Self {
checkpoint_margin: Some(10u16),
gc_duration: Some(Duration::from_secs(30)),
gc_duration: Some(Duration::from_secs(600)),
checkpoint_on_startup: false,
compress: false,
}
@@ -340,8 +340,6 @@ pub struct DatanodeOptions {
pub rpc_addr: String,
pub rpc_hostname: Option<String>,
pub rpc_runtime_size: usize,
pub mysql_addr: String,
pub mysql_runtime_size: usize,
pub http_opts: HttpOptions,
pub meta_client_options: Option<MetaClientOptions>,
pub wal: WalConfig,
@@ -359,8 +357,6 @@ impl Default for DatanodeOptions {
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_hostname: None,
rpc_runtime_size: 8,
mysql_addr: "127.0.0.1:4406".to_string(),
mysql_runtime_size: 2,
http_opts: HttpOptions::default(),
meta_client_options: None,
wal: WalConfig::default(),

View File

@@ -27,14 +27,14 @@ use table::error::Error as TableError;
pub enum Error {
#[snafu(display("Failed to access catalog, source: {}", source))]
AccessCatalog {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to deregister table: {}, source: {}", table_name, source))]
DeregisterTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -48,7 +48,7 @@ pub enum Error {
#[snafu(display("Failed to open table: {}, source: {}", table_name, source))]
OpenTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
@@ -68,7 +68,7 @@ pub enum Error {
CloseTable {
table_name: String,
region_numbers: Vec<RegionNumber>,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
@@ -93,31 +93,31 @@ pub enum Error {
#[snafu(display("Failed to execute sql, source: {}", source))]
ExecuteSql {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to execute statement, source: {}", source))]
ExecuteStatement {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to execute logical plan, source: {}", source))]
ExecuteLogicalPlan {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to decode logical plan, source: {}", source))]
DecodeLogicalPlan {
#[snafu(backtrace)]
location: Location,
source: substrait::error::Error,
},
@@ -126,7 +126,7 @@ pub enum Error {
#[snafu(display("Failed to create catalog list, source: {}", source))]
NewCatalog {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -139,21 +139,21 @@ pub enum Error {
#[snafu(display("Failed to create table: {}, source: {}", table_name, source))]
CreateTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
#[snafu(display("Failed to drop table {}, source: {}", table_name, source))]
DropTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound {
engine_name: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -164,7 +164,7 @@ pub enum Error {
))]
EngineProcedureNotFound {
engine_name: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -192,7 +192,7 @@ pub enum Error {
#[snafu(display("Failed to parse sql value, source: {}", source))]
ParseSqlValue {
#[snafu(backtrace)]
location: Location,
source: sql::error::Error,
},
@@ -202,7 +202,7 @@ pub enum Error {
#[snafu(display("Failed to insert value to table: {}, source: {}", table_name, source))]
Insert {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
@@ -213,20 +213,20 @@ pub enum Error {
))]
Delete {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
FlushTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: TableError,
},
#[snafu(display("Failed to start server, source: {}", source))]
StartServer {
#[snafu(backtrace)]
location: Location,
source: servers::error::Error,
},
@@ -250,8 +250,8 @@ pub enum Error {
#[snafu(display("Failed to open log store, source: {}", source))]
OpenLogStore {
#[snafu(backtrace)]
source: log_store::error::Error,
location: Location,
source: Box<log_store::error::Error>,
},
#[snafu(display("Failed to init backend, source: {}", source))]
@@ -262,7 +262,7 @@ pub enum Error {
#[snafu(display("Runtime resource error, source: {}", source))]
RuntimeResource {
#[snafu(backtrace)]
location: Location,
source: common_runtime::error::Error,
},
@@ -289,7 +289,7 @@ pub enum Error {
#[snafu(display("Failed to register a new schema, source: {}", source))]
RegisterSchema {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -298,25 +298,25 @@ pub enum Error {
#[snafu(display("Failed to convert alter expr to request: {}", source))]
AlterExprToRequest {
#[snafu(backtrace)]
location: Location,
source: common_grpc_expr::error::Error,
},
#[snafu(display("Failed to convert create expr to request: {}", source))]
CreateExprToRequest {
#[snafu(backtrace)]
location: Location,
source: common_grpc_expr::error::Error,
},
#[snafu(display("Failed to convert delete expr to request: {}", source))]
DeleteExprToRequest {
#[snafu(backtrace)]
location: Location,
source: common_grpc_expr::error::Error,
},
#[snafu(display("Failed to parse SQL, source: {}", source))]
ParseSql {
#[snafu(backtrace)]
location: Location,
source: sql::error::Error,
},
@@ -327,38 +327,38 @@ pub enum Error {
))]
ParseTimestamp {
raw: String,
#[snafu(backtrace)]
location: Location,
source: common_time::error::Error,
},
#[snafu(display("Failed to prepare immutable table: {}", source))]
PrepareImmutableTable {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to access catalog, source: {}", source))]
Catalog {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to find table {} from catalog, source: {}", table_name, source))]
FindTable {
table_name: String,
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to initialize meta client, source: {}", source))]
MetaClientInit {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Failed to insert data, source: {}", source))]
InsertData {
#[snafu(backtrace)]
location: Location,
source: common_grpc_expr::error::Error,
},
@@ -369,7 +369,7 @@ pub enum Error {
#[snafu(display("Failed to bump table id, source: {}", source))]
BumpTableId {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -392,7 +392,7 @@ pub enum Error {
))]
ColumnDefaultValue {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -404,45 +404,45 @@ pub enum Error {
#[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to recover procedure, source: {}", source))]
RecoverProcedure {
#[snafu(backtrace)]
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to submit procedure {}, source: {}", procedure_id, source))]
SubmitProcedure {
procedure_id: ProcedureId,
#[snafu(backtrace)]
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to wait procedure {} done, source: {}", procedure_id, source))]
WaitProcedure {
procedure_id: ProcedureId,
#[snafu(backtrace)]
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to close table engine, source: {}", source))]
CloseTableEngine {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to shutdown server, source: {}", source))]
ShutdownServer {
#[snafu(backtrace)]
location: Location,
source: servers::error::Error,
},
#[snafu(display("Failed to shutdown instance, source: {}", source))]
ShutdownInstance {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -487,15 +487,15 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
ExecuteSql { source }
| PlanStatement { source }
| ExecuteStatement { source }
| ExecuteLogicalPlan { source } => source.status_code(),
ExecuteSql { source, .. }
| PlanStatement { source, .. }
| ExecuteStatement { source, .. }
| ExecuteLogicalPlan { source, .. } => source.status_code(),
HandleHeartbeatResponse { source, .. } => source.status_code(),
DecodeLogicalPlan { source } => source.status_code(),
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
DecodeLogicalPlan { source, .. } => source.status_code(),
NewCatalog { source, .. } | RegisterSchema { source, .. } => source.status_code(),
FindTable { source, .. } => source.status_code(),
CreateTable { source, .. } => source.status_code(),
DropTable { source, .. } => source.status_code(),
@@ -512,9 +512,9 @@ impl ErrorExt for Error {
ParseSqlValue { source, .. } | ParseSql { source, .. } => source.status_code(),
AlterExprToRequest { source, .. }
| CreateExprToRequest { source }
| DeleteExprToRequest { source }
| InsertData { source } => source.status_code(),
| CreateExprToRequest { source, .. }
| DeleteExprToRequest { source, .. }
| InsertData { source, .. } => source.status_code(),
ColumnValuesNumberMismatch { .. }
| InvalidSql { .. }
@@ -559,13 +559,13 @@ impl ErrorExt for Error {
| CloseTableEngine { .. }
| JoinTask { .. } => StatusCode::Internal,
StartServer { source }
| ShutdownServer { source }
StartServer { source, .. }
| ShutdownServer { source, .. }
| WaitForGrpcServing { source, .. } => source.status_code(),
InitBackend { .. } => StatusCode::StorageUnavailable,
OpenLogStore { source } => source.status_code(),
OpenLogStore { source, .. } => source.status_code(),
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
MetaClientInit { source, .. } => source.status_code(),
TableIdProviderNotFound { .. } => StatusCode::Unsupported,

View File

@@ -23,7 +23,7 @@ use common_meta::heartbeat::handler::{
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::{error, info, trace, warn};
use common_telemetry::{debug, error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
use tokio::sync::mpsc;
@@ -199,6 +199,7 @@ impl HeartbeatTask {
}
};
if let Some(req) = req {
debug!("Sending heartbeat request: {:?}", req);
if let Err(e) = tx.send(req).await {
error!("Failed to send heartbeat to metasrv, error: {:?}", e);
match Self::create_streams(

View File

@@ -421,6 +421,7 @@ pub(crate) async fn create_log_store(
let logstore = RaftEngineLogStore::try_new(log_config)
.await
.map_err(Box::new)
.context(OpenLogStoreSnafu)?;
Ok(logstore)
}

View File

@@ -113,6 +113,14 @@ impl Vector for BinaryVector {
}
}
impl From<Vec<Vec<u8>>> for BinaryVector {
fn from(data: Vec<Vec<u8>>) -> Self {
Self {
array: BinaryArray::from_iter_values(data),
}
}
}
impl ScalarVector for BinaryVector {
type OwnedItem = Vec<u8>;
type RefItem<'a> = &'a [u8];

View File

@@ -130,6 +130,12 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
}
}
pub fn from_iter_values<I: IntoIterator<Item = T::Native>>(iter: I) -> Self {
Self {
array: PrimitiveArray::from_iter_values(iter),
}
}
pub fn from_values<I: IntoIterator<Item = T::Native>>(iter: I) -> Self {
Self {
array: PrimitiveArray::from_iter_values(iter),

View File

@@ -115,13 +115,13 @@ pub enum Error {
source
))]
ConvertRaw {
#[snafu(backtrace)]
location: Location,
source: table::metadata::ConvertError,
},
#[snafu(display("Invalid schema, source: {}", source))]
InvalidRawSchema {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -130,7 +130,7 @@ pub enum Error {
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
#[snafu(backtrace)]
location: Location,
source: common_datasource::error::Error,
},
@@ -154,13 +154,13 @@ pub enum Error {
#[snafu(display("Failed to build stream adapter: {}", source))]
BuildStreamAdapter {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to parse file format: {}", source))]
ParseFileFormat {
#[snafu(backtrace)]
location: Location,
source: common_datasource::error::Error,
},

View File

@@ -20,8 +20,7 @@ use common_meta::heartbeat::handler::{
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::tracing::trace;
use common_telemetry::{error, info};
use common_telemetry::{debug, error, info};
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use snafu::ResultExt;
use tokio::sync::mpsc;
@@ -83,7 +82,7 @@ impl HeartbeatTask {
loop {
match resp_stream.message().await {
Ok(Some(resp)) => {
trace!("Received a heartbeat response: {:?}", resp);
debug!("Receiving heartbeat response: {:?}", resp);
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), resp);
if let Err(e) = capture_self.handle_response(ctx) {
error!(e; "Error while handling heartbeat response");
@@ -92,7 +91,6 @@ impl HeartbeatTask {
Ok(None) => break,
Err(e) => {
error!(e; "Occur error while reading heartbeat response");
capture_self
.start_with_retry(Duration::from_secs(retry_interval))
.await;
@@ -148,7 +146,7 @@ impl HeartbeatTask {
error!(e; "Failed to send heartbeat to metasrv");
break;
} else {
trace!("Send a heartbeat request to metasrv, content: {:?}", req);
debug!("Send a heartbeat request to metasrv, content: {:?}", req);
}
}
}

View File

@@ -285,9 +285,6 @@ impl Instance {
requests: InsertRequests,
ctx: QueryContextRef,
) -> Result<Output> {
// TODO(LFC): Optimize concurrent table creation and table alteration.
// Currently table creation is guarded by a distributed lock in Metasrv. However, table
// alteration is not. We should all switch to procedures in Metasrv.
let _ = future::join_all(
requests
.inserts
@@ -563,6 +560,7 @@ impl PromHandler for Instance {
let stmt = QueryLanguageParser::parse_promql(query).with_context(|_| ParsePromQLSnafu {
query: query.clone(),
})?;
self.statement_executor
.execute_stmt(stmt, query_ctx)
.await

View File

@@ -598,7 +598,6 @@ impl DistInstance {
Ok(Output::AffectedRows(affected_rows as usize))
}
// TODO(LFC): Like insertions above, refactor GRPC deletion impl here.
async fn handle_dist_delete(
&self,
request: DeleteRequest,
@@ -662,8 +661,6 @@ impl GrpcQueryHandler for DistInstance {
match expr {
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await,
DdlExpr::CreateTable(mut expr) => {
// TODO(LFC): Support creating distributed table through GRPC interface.
// Currently only SQL supports it; how to design the fields in CreateTableExpr?
let _ = self.create_table(&mut expr, None).await;
Ok(Output::AffectedRows(0))
}

View File

@@ -23,13 +23,13 @@ use snafu::Location;
pub enum Error {
#[snafu(display("Failed to start log store gc task, source: {}", source))]
StartGcTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},
#[snafu(display("Failed to stop log store gc task, source: {}", source))]
StopGcTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},

View File

@@ -35,7 +35,7 @@ pub enum Error {
#[snafu(display("Failed to create gRPC channel, source: {}", source))]
CreateChannel {
#[snafu(backtrace)]
location: Location,
source: common_grpc::error::Error,
},
@@ -50,19 +50,19 @@ pub enum Error {
#[snafu(display("Invalid response header, source: {}", source))]
InvalidResponseHeader {
#[snafu(backtrace)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to convert Metasrv request, source: {}", source))]
ConvertMetaRequest {
#[snafu(backtrace)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to convert Metasrv response, source: {}", source))]
ConvertMetaResponse {
#[snafu(backtrace)]
location: Location,
source: common_meta::error::Error,
},
}
@@ -86,9 +86,9 @@ impl ErrorExt for Error {
| Error::CreateHeartbeatStream { .. }
| Error::CreateChannel { .. } => StatusCode::Internal,
Error::InvalidResponseHeader { source }
| Error::ConvertMetaRequest { source }
| Error::ConvertMetaResponse { source } => source.status_code(),
Error::InvalidResponseHeader { source, .. }
| Error::ConvertMetaRequest { source, .. }
| Error::ConvertMetaResponse { source, .. } => source.status_code(),
}
}
}

View File

@@ -26,7 +26,7 @@ pub enum Error {
#[snafu(display("Failed to shutdown {} server, source: {}", server, source))]
ShutdownServer {
#[snafu(backtrace)]
location: Location,
source: servers::error::Error,
server: String,
},
@@ -60,7 +60,7 @@ pub enum Error {
},
#[snafu(display("Failed to start http server, source: {}", source))]
StartHttp {
#[snafu(backtrace)]
location: Location,
source: servers::error::Error,
},
#[snafu(display("Failed to parse address {}, source: {}", addr, source))]
@@ -130,7 +130,7 @@ pub enum Error {
#[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue {
#[snafu(backtrace)]
location: Location,
source: common_catalog::error::Error,
},
@@ -190,7 +190,7 @@ pub enum Error {
#[snafu(display("Failed to create gRPC channel, source: {}", source))]
CreateChannel {
#[snafu(backtrace)]
location: Location,
source: common_grpc::error::Error,
},
@@ -273,7 +273,7 @@ pub enum Error {
#[snafu(display("Failed to recover procedure, source: {source}"))]
RecoverProcedure {
#[snafu(backtrace)]
location: Location,
source: common_procedure::Error,
},
@@ -321,7 +321,7 @@ pub enum Error {
))]
RegisterProcedureLoader {
type_name: String,
#[snafu(backtrace)]
location: Location,
source: common_procedure::error::Error,
},
@@ -350,7 +350,7 @@ pub enum Error {
#[snafu(display("Failed to convert table route, source: {}", source))]
TableRouteConversion {
#[snafu(backtrace)]
location: Location,
source: common_meta::error::Error,
},
@@ -434,15 +434,15 @@ impl ErrorExt for Error {
| Error::Unexpected { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::RecoverProcedure { source } => source.status_code(),
Error::ShutdownServer { source, .. } | Error::StartHttp { source } => {
Error::RecoverProcedure { source, .. } => source.status_code(),
Error::ShutdownServer { source, .. } | Error::StartHttp { source, .. } => {
source.status_code()
}
Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted,
Error::RegisterProcedureLoader { source, .. } => source.status_code(),
Error::TableRouteConversion { source } => source.status_code(),
Error::TableRouteConversion { source, .. } => source.status_code(),
Error::Other { source, .. } => source.status_code(),
}
}

View File

@@ -74,12 +74,6 @@ impl HeartbeatHandler for RegionFailureHandler {
let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
// TODO(LFC): Filter out the stalled heartbeats:
// After the region failover is done, the distribution of region is changed.
// We can compare the heartbeat info here with the global region placement metadata,
// and remove the incorrect region ident keys in failure detect runner
// (by sending a control message).
let heartbeat = DatanodeHeartbeat {
region_idents: stat
.region_stats

View File

@@ -146,25 +146,21 @@ impl MetaSrv {
common_runtime::spawn_bg(async move {
loop {
match rx.recv().await {
Ok(msg) => {
match msg {
LeaderChangeMessage::Elected(_) => {
if let Err(e) = procedure_manager.recover().await {
error!("Failed to recover procedures, error: {e}");
}
}
LeaderChangeMessage::StepDown(leader) => {
// TODO(LFC): TBC
error!("Leader :{:?} step down", leader);
Ok(msg) => match msg {
LeaderChangeMessage::Elected(_) => {
if let Err(e) = procedure_manager.recover().await {
error!("Failed to recover procedures, error: {e}");
}
}
}
LeaderChangeMessage::StepDown(leader) => {
error!("Leader :{:?} step down", leader);
}
},
Err(RecvError::Closed) => {
error!("Not expected, is leader election loop still running?");
break;
}
Err(RecvError::Lagged(_)) => {
// TODO(LFC): TBC
break;
}
}

View File

@@ -43,16 +43,6 @@ impl UpdateRegionMetadata {
Self { candidate }
}
// TODO(LFC): Update the two table metadata values in a batch atomically.
//
// Though the updating of the two metadata values is guarded by a distributed lock,
// it does not robust enough. For example, the lock lease could be expired in the middle of
// one's updating, letting others to start updating concurrently. For now, we set the lease of
// the distributed lock to 10 seconds, which is long enough here to get the job done.
//
// Maybe we should introduce "version" companion values to these two metadata values, and
// use ETCD transaction request to update them?
/// Updates the metadata of the table. Specifically, the [TableGlobalValue] and [TableRouteValue].
async fn update_metadata(
&self,

View File

@@ -16,6 +16,7 @@ mod health;
mod heartbeat;
mod leader;
mod meta;
mod route;
use std::collections::HashMap;
use std::convert::Infallible;
@@ -73,6 +74,13 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
},
);
let router = router.route(
"/route",
route::RouteHandler {
kv_store: meta_srv.kv_store(),
},
);
let router = Router::nest("/admin", router);
Admin::new(router)

View File

@@ -0,0 +1,86 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use api::v1::meta::{RangeRequest, RangeResponse, TableRouteValue};
use common_meta::key::TABLE_ROUTE_PREFIX;
use prost::Message;
use snafu::{OptionExt, ResultExt};
use tonic::codegen::http;
use super::HttpHandler;
use crate::error::Result;
use crate::service::store::kv::KvStoreRef;
use crate::{error, util};
pub struct RouteHandler {
pub kv_store: KvStoreRef,
}
#[async_trait::async_trait]
impl HttpHandler for RouteHandler {
async fn handle(
&self,
_path: &str,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let full_table_name = params
.get("full_table_name")
.map(|full_table_name| full_table_name.replace('.', "-"))
.context(error::MissingRequiredParameterSnafu {
param: "full_table_name",
})?;
let route_key = format!("{}-{}", TABLE_ROUTE_PREFIX, full_table_name).into_bytes();
let range_end = util::get_prefix_end_key(&route_key);
let req = RangeRequest {
key: route_key,
range_end,
keys_only: false,
..Default::default()
};
let resp = self.kv_store.range(req).await?;
let show = pretty_fmt(resp)?;
http::Response::builder()
.status(http::StatusCode::OK)
.body(show)
.context(error::InvalidHttpBodySnafu)
}
}
fn pretty_fmt(response: RangeResponse) -> Result<String> {
let mut show = "".to_string();
for kv in response.kvs.into_iter() {
let route_key = String::from_utf8(kv.key).unwrap();
let route_val =
TableRouteValue::decode(&kv.value[..]).context(error::DecodeTableRouteSnafu)?;
show.push_str("route_key:\n");
show.push_str(&route_key);
show.push('\n');
show.push_str("route_value:\n");
show.push_str(&format!("{:#?}", route_val));
show.push('\n');
}
Ok(show)
}

View File

@@ -20,7 +20,7 @@ use api::v1::meta::{
heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse,
Peer, RequestHeader, ResponseHeader, Role,
};
use common_telemetry::{error, info, warn};
use common_telemetry::{debug, error, info, warn};
use futures::StreamExt;
use once_cell::sync::OnceCell;
use tokio::sync::mpsc;
@@ -59,6 +59,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
break;
}
};
debug!("Receiving heartbeat request: {:?}", req);
if pusher_key.is_none() {
let node_id = get_node_id(header);
@@ -76,6 +77,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
is_not_leader = res.as_ref().map_or(false, |r| r.is_not_leader());
debug!("Sending heartbeat response: {:?}", res);
tx.send(res).await.expect("working rx");
}
Err(err) => {

View File

@@ -21,6 +21,7 @@ common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-datasource = { path = "../common/datasource" }
common-telemetry = { path = "../common/telemetry" }
common-test-util = { path = "../common/test-util", optional = true }
common-time = { path = "../common/time" }
dashmap = "5.4"
datafusion.workspace = true
@@ -29,6 +30,7 @@ datatypes = { path = "../datatypes" }
futures.workspace = true
key-lock = "0.1"
log-store = { path = "../log-store" }
metrics.workspace = true
object-store = { path = "../object-store" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@@ -36,7 +38,6 @@ snafu.workspace = true
storage = { path = "../storage" }
store-api = { path = "../store-api" }
table = { path = "../table" }
common-test-util = { path = "../common/test-util", optional = true }
tokio.workspace = true
[dev-dependencies]

View File

@@ -295,7 +295,7 @@ fn build_row_key_desc(
let column_schemas = &table_schema.column_schemas();
//TODO(boyan): enable version column by table option?
//TODO(dennis): enable version column by table option?
let mut builder = RowKeyDescriptorBuilder::new(ts_column);
for index in primary_key_indices {
@@ -452,7 +452,6 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.write_buffer_size
.map(|s| s.0 as usize),
ttl: table_info.meta.options.ttl,
compaction_time_window: table_info.meta.options.compaction_time_window,
};
debug!(
@@ -532,7 +531,6 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.write_buffer_size
.map(|s| s.0 as usize),
ttl: table_info.meta.options.ttl,
compaction_time_window: table_info.meta.options.compaction_time_window,
};
// TODO(weny): Returns an error earlier if the target region does not exist in the meta.

View File

@@ -228,18 +228,15 @@ impl<S: StorageEngine> TableCreator<S> {
let table_options = &self.data.request.table_options;
let write_buffer_size = table_options.write_buffer_size.map(|size| size.0 as usize);
let ttl = table_options.ttl;
let compaction_time_window = table_options.compaction_time_window;
let open_opts = OpenOptions {
parent_dir: table_dir.to_string(),
write_buffer_size,
ttl,
compaction_time_window,
};
let create_opts = CreateOptions {
parent_dir: table_dir.to_string(),
write_buffer_size,
ttl,
compaction_time_window,
};
let primary_key_indices = &self.data.request.primary_key_indices;
@@ -285,7 +282,6 @@ impl<S: StorageEngine> TableCreator<S> {
.name(region_name.clone())
.row_key(row_key.clone())
.default_cf(default_cf.clone())
.compaction_time_window(compaction_time_window)
.build()
.context(BuildRegionDescriptorSnafu {
table_name: &self.data.request.table_name,

View File

@@ -107,7 +107,7 @@ pub enum Error {
source,
))]
UpdateTableManifest {
#[snafu(backtrace)]
location: Location,
source: storage::error::Error,
table_name: String,
},
@@ -118,7 +118,7 @@ pub enum Error {
source,
))]
ScanTableManifest {
#[snafu(backtrace)]
location: Location,
source: storage::error::Error,
table_name: String,
},
@@ -149,7 +149,7 @@ pub enum Error {
source
))]
ConvertRaw {
#[snafu(backtrace)]
location: Location,
source: table::metadata::ConvertError,
},

View File

@@ -25,3 +25,7 @@ pub const MITO_CREATE_TABLE_UPDATE_MANIFEST_ELAPSED: &str =
pub const MITO_OPEN_TABLE_ELAPSED: &str = "datanode.mito.open_table";
/// Elapsed time of altering tables
pub const MITO_ALTER_TABLE_ELAPSED: &str = "datanode.mito.alter_table";
/// Elapsed time of insertion
pub const MITO_INSERT_ELAPSED: &str = "datanode.mito.insert";
/// Insert batch size.
pub const MITO_INSERT_BATCH_SIZE: &str = "datanode.mito.insert_batch_size";

View File

@@ -29,6 +29,7 @@ use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream};
use common_telemetry::{info, logging};
use datatypes::schema::Schema;
use metrics::histogram;
use object_store::ObjectStore;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
@@ -57,6 +58,8 @@ use crate::error::{
};
use crate::manifest::action::*;
use crate::manifest::TableManifest;
use crate::metrics::{MITO_INSERT_BATCH_SIZE, MITO_INSERT_ELAPSED};
#[inline]
fn table_manifest_dir(table_dir: &str) -> String {
assert!(table_dir.ends_with('/'));
@@ -83,6 +86,8 @@ impl<R: Region> Table for MitoTable<R> {
}
async fn insert(&self, request: InsertRequest) -> TableResult<usize> {
let _timer = common_telemetry::timer!(MITO_INSERT_ELAPSED);
if request.columns_values.is_empty() {
return Ok(0);
}
@@ -105,6 +110,8 @@ impl<R: Region> Table for MitoTable<R> {
// columns_values is not empty, it's safe to unwrap
let rows_num = columns_values.values().next().unwrap().len();
histogram!(MITO_INSERT_BATCH_SIZE, rows_num as f64);
logging::trace!(
"Insert into table {} region {} with data: {:?}",
self.table_info().name,

View File

@@ -14,9 +14,9 @@ metrics.workspace = true
opendal = { version = "0.36", features = ["layers-tracing", "layers-metrics"] }
pin-project = "1.0"
tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
anyhow = "1.0"
common-telemetry = { path = "../common/telemetry" }
common-test-util = { path = "../common/test-util" }
uuid.workspace = true

View File

@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env;
use crate::{ObjectStore, Result};
/// Temp folder for object store test
pub struct TempFolder {
store: ObjectStore,
// The path under root.
@@ -28,7 +31,34 @@ impl TempFolder {
}
}
pub async fn remove_all(&mut self) -> Result<()> {
pub async fn remove_all(&self) -> Result<()> {
self.store.remove_all(&self.path).await
}
}
/// Test s3 config from environment variables
#[derive(Debug)]
pub struct TestS3Config {
pub root: String,
pub access_key_id: String,
pub secret_access_key: String,
pub bucket: String,
pub region: Option<String>,
}
/// Returns s3 test config, return None if not found.
pub fn s3_test_config() -> Option<TestS3Config> {
if let Ok(b) = env::var("GT_S3_BUCKET") {
if !b.is_empty() {
return Some(TestS3Config {
root: uuid::Uuid::new_v4().to_string(),
access_key_id: env::var("GT_S3_ACCESS_KEY_ID").ok()?,
secret_access_key: env::var("GT_S3_ACCESS_KEY").ok()?,
bucket: env::var("GT_S3_BUCKET").ok()?,
region: Some(env::var("GT_S3_REGION").ok()?),
});
}
}
None
}

View File

@@ -120,7 +120,7 @@ async fn test_s3_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/");
let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?;
test_object_list(&store).await?;
guard.remove_all().await?;
@@ -148,7 +148,7 @@ async fn test_oss_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/");
let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?;
test_object_list(&store).await?;
guard.remove_all().await?;
@@ -176,7 +176,7 @@ async fn test_azblob_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/");
let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?;
test_object_list(&store).await?;
guard.remove_all().await?;

View File

@@ -28,7 +28,7 @@ pub enum Error {
#[snafu(display("Failed to request Meta, source: {}", source))]
RequestMeta {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
@@ -75,7 +75,7 @@ pub enum Error {
))]
CreateDefaultToRead {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -128,7 +128,7 @@ pub enum Error {
))]
ConvertScalarValue {
value: ScalarValue,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},

View File

@@ -34,7 +34,7 @@ pub enum InnerError {
#[snafu(display("Fail to convert arrow schema, source: {}", source))]
ConvertSchema {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -43,13 +43,13 @@ pub enum InnerError {
source
))]
ConvertDfRecordBatchStream {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to execute physical plan, source: {}", source))]
ExecutePhysicalPlan {
#[snafu(backtrace)]
location: Location,
source: common_query::error::Error,
},
}
@@ -62,8 +62,8 @@ impl ErrorExt for InnerError {
// TODO(yingwen): Further categorize datafusion error.
Datafusion { .. } => StatusCode::EngineExecuteQuery,
PhysicalPlanDowncast { .. } | ConvertSchema { .. } => StatusCode::Unexpected,
ConvertDfRecordBatchStream { source } => source.status_code(),
ExecutePhysicalPlan { source } => source.status_code(),
ConvertDfRecordBatchStream { source, .. } => source.status_code(),
ExecutePhysicalPlan { source, .. } => source.status_code(),
}
}

View File

@@ -27,6 +27,10 @@ use datafusion::catalog::catalog::MemoryCatalogList;
use datafusion::error::Result as DfResult;
use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionState};
use datafusion::execution::runtime_env::RuntimeEnv;
use datafusion::physical_optimizer::dist_enforcement::EnforceDistribution;
use datafusion::physical_optimizer::repartition::Repartition;
use datafusion::physical_optimizer::sort_enforcement::EnforceSorting;
use datafusion::physical_optimizer::PhysicalOptimizerRule;
use datafusion::physical_plan::planner::{DefaultPhysicalPlanner, ExtensionPlanner};
use datafusion::physical_plan::{ExecutionPlan, PhysicalPlanner};
use datafusion_expr::LogicalPlan as DfLogicalPlan;
@@ -79,6 +83,22 @@ impl QueryEngineState {
let mut optimizer = Optimizer::new();
optimizer.rules.push(Arc::new(OrderHintRule));
let mut physical_optimizers = {
let state = SessionState::with_config_rt(session_config.clone(), runtime_env.clone());
state.physical_optimizers().to_vec()
};
// run the repartition and sort enforcement rules first.
// And `EnforceSorting` is required to run after `EnforceDistribution`.
Self::remove_physical_optimize_rule(&mut physical_optimizers, EnforceSorting {}.name());
Self::remove_physical_optimize_rule(
&mut physical_optimizers,
EnforceDistribution {}.name(),
);
Self::remove_physical_optimize_rule(&mut physical_optimizers, Repartition {}.name());
physical_optimizers.insert(0, Arc::new(EnforceSorting {}));
physical_optimizers.insert(0, Arc::new(EnforceDistribution {}));
physical_optimizers.insert(0, Arc::new(Repartition {}));
let session_state = SessionState::with_config_rt_and_catalog_list(
session_config,
runtime_env,
@@ -90,7 +110,8 @@ impl QueryEngineState {
partition_manager,
datanode_clients,
)))
.with_optimizer_rules(optimizer.rules);
.with_optimizer_rules(optimizer.rules)
.with_physical_optimizer_rules(physical_optimizers);
let df_context = SessionContext::with_state(session_state);
@@ -102,6 +123,22 @@ impl QueryEngineState {
}
}
fn remove_physical_optimize_rule(
rules: &mut Vec<Arc<dyn PhysicalOptimizerRule + Send + Sync>>,
name: &str,
) {
let mut index_to_move = None;
for (i, rule) in rules.iter().enumerate() {
if rule.name() == name {
index_to_move = Some(i);
break;
}
}
if let Some(index) = index_to_move {
rules.remove(index);
}
}
/// Register a udf function
// TODO(dennis): manage UDFs by ourself.
pub fn register_udf(&self, udf: ScalarUdf) {
@@ -116,13 +153,25 @@ impl QueryEngineState {
.cloned()
}
/// Register an aggregate function.
///
/// # Panics
/// Will panic if the function with same name is already registered.
///
/// Panicking consideration: currently the aggregated functions are all statically registered,
/// user cannot define their own aggregate functions on the fly. So we can panic here. If that
/// invariant is broken in the future, we should return an error instead of panicking.
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
// TODO(LFC): Return some error if there exists an aggregate function with the same name.
// Simply overwrite the old value for now.
self.aggregate_functions
let name = func.name();
let x = self
.aggregate_functions
.write()
.unwrap()
.insert(func.name(), func);
.insert(name.clone(), func);
assert!(
x.is_none(),
"Already registered aggregate function '{name}'"
);
}
#[inline]

View File

@@ -96,7 +96,6 @@ pub async fn show_databases(
stmt: ShowDatabases,
catalog_manager: CatalogManagerRef,
) -> Result<Output> {
// TODO(LFC): supports WHERE
ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu {
@@ -136,7 +135,6 @@ pub async fn show_tables(
catalog_manager: CatalogManagerRef,
query_ctx: QueryContextRef,
) -> Result<Output> {
// TODO(LFC): supports WHERE
ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu {

View File

@@ -71,10 +71,6 @@ fn create_sql_options(table_meta: &TableMeta) -> Vec<SqlOption> {
));
}
if let Some(w) = table_opts.compaction_time_window {
options.push(sql_option("compaction_time_window", number_value(w)));
}
for (k, v) in table_opts
.extra_options
.iter()

View File

@@ -23,7 +23,7 @@ use snafu::Location;
pub enum Error {
#[snafu(display("Failed to find scripts table, source: {}", source))]
FindScriptsTable {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -32,7 +32,7 @@ pub enum Error {
#[snafu(display("Failed to register scripts table, source: {}", source))]
RegisterScriptsTable {
#[snafu(backtrace)]
location: Location,
source: catalog::error::Error,
},
@@ -46,21 +46,21 @@ pub enum Error {
))]
InsertScript {
name: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to compile python script, name: {}, source: {}", name, source))]
CompilePython {
name: String,
#[snafu(backtrace)]
location: Location,
source: crate::python::error::Error,
},
#[snafu(display("Failed to execute python script {}, source: {}", name, source))]
ExecutePython {
name: String,
#[snafu(backtrace)]
location: Location,
source: crate::python::error::Error,
},
@@ -70,13 +70,13 @@ pub enum Error {
#[snafu(display("Failed to find script by name: {}", name))]
FindScript {
name: String,
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to collect record batch, source: {}", source))]
CollectRecords {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -92,11 +92,13 @@ impl ErrorExt for Error {
match self {
FindColumnInScriptsTable { .. } | CastType { .. } => StatusCode::Unexpected,
ScriptsTableNotFound { .. } => StatusCode::TableNotFound,
RegisterScriptsTable { source } | FindScriptsTable { source } => source.status_code(),
RegisterScriptsTable { source, .. } | FindScriptsTable { source, .. } => {
source.status_code()
}
InsertScript { source, .. } => source.status_code(),
CompilePython { source, .. } | ExecutePython { source, .. } => source.status_code(),
FindScript { source, .. } => source.status_code(),
CollectRecords { source } => source.status_code(),
CollectRecords { source, .. } => source.status_code(),
ScriptNotFound { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -40,7 +40,7 @@ use snafu::{ensure, ResultExt};
use sql::statements::statement::Statement;
use crate::engine::{CompileContext, EvalContext, Script, ScriptEngine};
use crate::python::error::{self, PyRuntimeSnafu, Result, TokioJoinSnafu};
use crate::python::error::{self, DatabaseQuerySnafu, PyRuntimeSnafu, Result, TokioJoinSnafu};
use crate::python::ffi_types::copr::{exec_parsed, parse, AnnotationInfo, CoprocessorRef};
use crate::python::utils::spawn_blocking_script;
const PY_ENGINE: &str = "python";
@@ -290,8 +290,13 @@ impl Script for PyScript {
.query_engine
.planner()
.plan(stmt, QueryContext::arc())
.await?;
let res = self.query_engine.execute(plan, QueryContext::arc()).await?;
.await
.context(DatabaseQuerySnafu)?;
let res = self
.query_engine
.execute(plan, QueryContext::arc())
.await
.context(DatabaseQuerySnafu)?;
let copr = self.copr.clone();
match res {
Output::Stream(stream) => Ok(Output::Stream(Box::pin(CoprStream::try_new(
@@ -346,6 +351,7 @@ impl ScriptEngine for PyEngine {
})
}
}
#[cfg(test)]
pub(crate) use tests::sample_script_engine;

View File

@@ -35,13 +35,13 @@ pub(crate) fn ret_other_error_with(reason: String) -> OtherSnafu<String> {
pub enum Error {
#[snafu(display("Datatype error: {}", source))]
TypeCast {
#[snafu(backtrace)]
location: SnafuLocation,
source: DataTypeError,
},
#[snafu(display("Failed to query, source: {}", source))]
DatabaseQuery {
#[snafu(backtrace)]
location: SnafuLocation,
source: QueryError,
},
@@ -105,25 +105,19 @@ pub enum Error {
#[snafu(display("Failed to retrieve record batches, source: {}", source))]
RecordBatch {
#[snafu(backtrace)]
location: SnafuLocation,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create record batch, source: {}", source))]
NewRecordBatch {
#[snafu(backtrace)]
location: SnafuLocation,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create tokio task, source: {}", source))]
TokioJoin { source: tokio::task::JoinError },
}
impl From<QueryError> for Error {
fn from(source: QueryError) -> Self {
Self::DatabaseQuery { source }
}
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
@@ -133,11 +127,11 @@ impl ErrorExt for Error {
| Error::TokioJoin { .. }
| Error::Other { .. } => StatusCode::Internal,
Error::RecordBatch { source } | Error::NewRecordBatch { source } => {
Error::RecordBatch { source, .. } | Error::NewRecordBatch { source, .. } => {
source.status_code()
}
Error::DatabaseQuery { source } => source.status_code(),
Error::TypeCast { source } => source.status_code(),
Error::DatabaseQuery { source, .. } => source.status_code(),
Error::TypeCast { source, .. } => source.status_code(),
Error::PyParse { .. }
| Error::PyCompile { .. }
@@ -150,12 +144,6 @@ impl ErrorExt for Error {
self
}
}
// impl from for those error so one can use question mark and implicitly cast into `CoprError`
impl From<DataTypeError> for Error {
fn from(e: DataTypeError) -> Self {
Self::TypeCast { source: e }
}
}
/// pretty print [`Error`] in given script,
/// basically print a arrow which point to where error occurs(if possible to get a location)

View File

@@ -5,6 +5,7 @@ edition.workspace = true
license.workspace = true
[features]
pprof = ["dep:common-pprof"]
mem-prof = ["dep:common-mem-prof"]
dashboard = []
@@ -25,6 +26,7 @@ common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" }
common-mem-prof = { path = "../common/mem-prof", optional = true }
common-pprof = { path = "../common/pprof", optional = true }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
@@ -71,6 +73,7 @@ snap = "1"
sql = { path = "../sql" }
strum = { version = "0.24", features = ["derive"] }
table = { path = "../table" }
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
tokio-rustls = "0.24"
tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true

View File

@@ -111,7 +111,7 @@ pub enum Error {
#[snafu(display("Auth failed, source: {}", source))]
AuthBackend {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},

View File

@@ -49,7 +49,7 @@ pub enum Error {
#[snafu(display("Failed to collect recordbatch, source: {}", source))]
CollectRecordbatch {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -71,19 +71,19 @@ pub enum Error {
#[snafu(display("Failed to execute query: {}, source: {}", query, source))]
ExecuteQuery {
query: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("{source}"))]
ExecuteGrpcQuery {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to check database validity, source: {}", source))]
CheckDatabaseValidity {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -93,14 +93,14 @@ pub enum Error {
#[snafu(display("Failed to insert script with name: {}, source: {}", name, source))]
InsertScript {
name: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to execute script by name: {}, source: {}", name, source))]
ExecuteScript {
name: String,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -112,13 +112,19 @@ pub enum Error {
#[snafu(display("Failed to parse InfluxDB line protocol, source: {}", source))]
InfluxdbLineProtocol {
#[snafu(backtrace)]
location: Location,
source: influxdb_line_protocol::Error,
},
#[snafu(display("Failed to write InfluxDB line protocol, source: {}", source))]
InfluxdbLinesWrite {
#[snafu(backtrace)]
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to write prometheus series, source: {}", source))]
PromSeriesWrite {
location: Location,
source: common_grpc::error::Error,
},
@@ -172,7 +178,7 @@ pub enum Error {
#[snafu(display("Failed to get user info, source: {}", source))]
Auth {
#[snafu(backtrace)]
location: Location,
source: auth::Error,
},
@@ -215,7 +221,7 @@ pub enum Error {
#[cfg(feature = "mem-prof")]
#[snafu(display("Failed to dump profile data, source: {}", source))]
DumpProfileData {
#[snafu(backtrace)]
location: Location,
source: common_mem_prof::error::Error,
},
@@ -240,7 +246,7 @@ pub enum Error {
#[snafu(display("Failed to parse PromQL: {query:?}, source: {source}"))]
ParsePromQL {
query: PromQuery,
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
@@ -260,6 +266,19 @@ pub enum Error {
source: tokio::task::JoinError,
location: Location,
},
#[cfg(feature = "pprof")]
#[snafu(display("Failed to dump pprof data, source: {}", source))]
DumpPprof {
#[snafu(backtrace)]
source: common_pprof::Error,
},
#[snafu(display("Failed to update jemalloc metrics, source: {source}, location: {location}"))]
UpdateJemallocMetrics {
source: tikv_jemalloc_ctl::Error,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -300,7 +319,9 @@ impl ErrorExt for Error {
| InvalidPrepareStatement { .. }
| TimePrecision { .. } => StatusCode::InvalidArguments,
InfluxdbLinesWrite { source, .. } => source.status_code(),
InfluxdbLinesWrite { source, .. } | PromSeriesWrite { source, .. } => {
source.status_code()
}
Hyper { .. } => StatusCode::Unknown,
TlsRequired { .. } => StatusCode::Unknown,
@@ -333,6 +354,11 @@ impl ErrorExt for Error {
StatusCode::Unknown
}
}
#[cfg(feature = "pprof")]
DumpPprof { source, .. } => source.status_code(),
UpdateJemallocMetrics { .. } => StatusCode::Internal,
}
}
@@ -394,17 +420,12 @@ impl From<std::io::Error> for Error {
}
}
impl From<auth::Error> for Error {
fn from(e: auth::Error) -> Self {
Error::Auth { source: e }
}
}
impl IntoResponse for Error {
fn into_response(self) -> Response {
let (status, error_message) = match self {
Error::InfluxdbLineProtocol { .. }
| Error::InfluxdbLinesWrite { .. }
| Error::PromSeriesWrite { .. }
| Error::InvalidOpentsdbLine { .. }
| Error::InvalidOpentsdbJsonRequest { .. }
| Error::DecodePromRemoteRequest { .. }

View File

@@ -29,8 +29,8 @@ use snafu::{OptionExt, ResultExt};
use tonic::Status;
use crate::auth::{Identity, Password, UserProviderRef};
use crate::error::Error::{Auth, UnsupportedAuthScheme};
use crate::error::{InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu};
use crate::error::Error::UnsupportedAuthScheme;
use crate::error::{AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu};
use crate::grpc::TonicResult;
use crate::metrics::{
METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
@@ -123,7 +123,7 @@ impl GreptimeRequestHandler {
&query_ctx.current_schema(),
)
.await
.map_err(|e| Auth { source: e }),
.context(AuthSnafu),
AuthScheme::Token(_) => Err(UnsupportedAuthScheme {
name: "Token AuthScheme".to_string(),
}),

View File

@@ -15,14 +15,18 @@
//! PrometheusGateway provides a gRPC interface to query Prometheus metrics
//! by PromQL. The behavior is similar to the Prometheus HTTP API.
use std::sync::Arc;
use api::v1::prometheus_gateway_server::PrometheusGateway;
use api::v1::promql_request::Promql;
use api::v1::{PromqlRequest, PromqlResponse, ResponseHeader};
use async_trait::async_trait;
use common_error::prelude::ErrorExt;
use common_telemetry::timer;
use common_time::util::current_time_rfc3339;
use promql_parser::parser::ValueType;
use query::parser::PromQuery;
use session::context::QueryContext;
use snafu::OptionExt;
use tonic::{Request, Response};
@@ -68,23 +72,9 @@ impl PrometheusGateway for PrometheusGatewayService {
};
let query_context = create_query_context(inner.header.as_ref());
let _timer = timer!(
crate::metrics::METRIC_SERVER_GRPC_PROM_REQUEST_TIMER,
&[(
crate::metrics::METRIC_DB_LABEL,
query_context.get_db_string()
)]
);
let result = self.handler.do_query(&prom_query, query_context).await;
let (metric_name, mut result_type) =
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default();
// range query only returns matrix
if is_range_query {
result_type = Some(ValueType::Matrix)
};
let json_response = PromJsonResponse::from_query_result(result, metric_name, result_type)
.await
.0;
let json_response = self
.handle_inner(prom_query, query_context, is_range_query)
.await;
let json_bytes = serde_json::to_string(&json_response).unwrap().into_bytes();
let response = Response::new(PromqlResponse {
@@ -99,4 +89,34 @@ impl PrometheusGatewayService {
pub fn new(handler: PromHandlerRef) -> Self {
Self { handler }
}
async fn handle_inner(
&self,
query: PromQuery,
ctx: Arc<QueryContext>,
is_range_query: bool,
) -> PromJsonResponse {
let _timer = timer!(
crate::metrics::METRIC_SERVER_GRPC_PROM_REQUEST_TIMER,
&[(crate::metrics::METRIC_DB_LABEL, ctx.get_db_string())]
);
let result = self.handler.do_query(&query, ctx).await;
let (metric_name, mut result_type) =
match retrieve_metric_name_and_result_type(&query.query) {
Ok((metric_name, result_type)) => (metric_name.unwrap_or_default(), result_type),
Err(err) => {
return PromJsonResponse::error(err.status_code().to_string(), err.to_string())
.0
}
};
// range query only returns matrix
if is_range_query {
result_type = ValueType::Matrix;
};
PromJsonResponse::from_query_result(result, metric_name, result_type)
.await
.0
}
}

View File

@@ -12,18 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod admin;
pub mod authorize;
pub mod handler;
pub mod influxdb;
pub mod mem_prof;
pub mod opentsdb;
mod pprof;
pub mod prometheus;
pub mod script;
mod admin;
#[cfg(feature = "dashboard")]
mod dashboard;
#[cfg(feature = "mem-prof")]
pub mod mem_prof;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -503,15 +503,6 @@ impl HttpServer {
);
}
// mem profiler
#[cfg(feature = "mem-prof")]
{
router = router.nest(
&format!("/{HTTP_API_VERSION}/prof"),
Router::new().route("/mem", routing::get(crate::http::mem_prof::mem_prof)),
);
}
if let Some(metrics_handler) = self.metrics_handler {
router = router.nest("", self.route_metrics(metrics_handler));
}
@@ -556,6 +547,19 @@ impl HttpServer {
HttpAuth::<BoxBody>::new(self.user_provider.clone()),
)),
)
// Handlers for debug, we don't expect a timeout.
.nest(
&format!("/{HTTP_API_VERSION}/prof"),
Router::new()
.route(
"/cpu",
routing::get(pprof::pprof_handler).post(pprof::pprof_handler),
)
.route(
"/mem",
routing::get(mem_prof::mem_prof_handler).post(mem_prof::mem_prof_handler),
),
)
}
fn route_metrics<S>(&self, metrics_handler: MetricsHandler) -> Router<S> {

View File

@@ -23,16 +23,15 @@ use http_body::Body;
use metrics::increment_counter;
use secrecy::SecretString;
use session::context::UserInfo;
use snafu::{ensure, OptionExt, ResultExt};
use snafu::{ensure, IntoError, OptionExt, ResultExt};
use tower_http::auth::AsyncAuthorizeRequest;
use super::PUBLIC_APIS;
use crate::auth::Error::IllegalParam;
use crate::auth::{Identity, IllegalParamSnafu, UserProviderRef};
use crate::error::Error::Auth;
use crate::error::{
self, InvalidAuthorizationHeaderSnafu, InvisibleASCIISnafu, NotFoundInfluxAuthSnafu, Result,
UnsupportedAuthSchemeSnafu,
self, AuthSnafu, InvalidAuthorizationHeaderSnafu, InvisibleASCIISnafu, NotFoundInfluxAuthSnafu,
Result, UnsupportedAuthSchemeSnafu,
};
use crate::http::HTTP_API_PREFIX;
@@ -183,12 +182,9 @@ fn get_influxdb_credentials<B: Send + Sync + 'static>(
(Some(username), Some(password)) => {
Ok(Some((username.to_string(), password.to_string().into())))
}
_ => Err(Auth {
source: IllegalParam {
msg: "influxdb auth: username and password must be provided together"
.to_string(),
},
}),
_ => Err(AuthSnafu.into_error(IllegalParam {
msg: "influxdb auth: username and password must be provided together".to_string(),
})),
}
}
}

View File

@@ -19,14 +19,14 @@ use aide::transform::TransformOperation;
use axum::extract::{Json, Query, State};
use axum::{Extension, Form};
use common_error::status_code::StatusCode;
use common_telemetry::timer;
use common_telemetry::{error, timer};
use query::parser::PromQuery;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use session::context::UserInfo;
use crate::http::{ApiState, JsonResponse};
use crate::metrics::PROCESS_COLLECTOR;
use crate::metrics::{JEMALLOC_COLLECTOR, PROCESS_COLLECTOR};
use crate::metrics_handler::MetricsHandler;
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
@@ -137,7 +137,11 @@ pub async fn metrics(
) -> String {
// Collect process metrics.
PROCESS_COLLECTOR.collect();
if let Some(c) = JEMALLOC_COLLECTOR.as_ref() {
if let Err(e) = c.update() {
error!(e; "Failed to update jemalloc metrics");
}
}
state.render()
}

View File

@@ -14,13 +14,14 @@
use axum::http::StatusCode;
use axum::response::IntoResponse;
use snafu::ResultExt;
use crate::error::DumpProfileDataSnafu;
#[cfg(feature = "mem-prof")]
#[axum_macros::debug_handler]
pub async fn mem_prof() -> crate::error::Result<impl IntoResponse> {
pub async fn mem_prof_handler() -> crate::error::Result<impl IntoResponse> {
use snafu::ResultExt;
use crate::error::DumpProfileDataSnafu;
Ok((
StatusCode::OK,
common_mem_prof::dump_profile()
@@ -28,3 +29,12 @@ pub async fn mem_prof() -> crate::error::Result<impl IntoResponse> {
.context(DumpProfileDataSnafu)?,
))
}
#[cfg(not(feature = "mem-prof"))]
#[axum_macros::debug_handler]
pub async fn mem_prof_handler() -> crate::error::Result<impl IntoResponse> {
Ok((
StatusCode::NOT_IMPLEMENTED,
"The 'mem-prof' feature is disabled",
))
}

View File

@@ -0,0 +1,98 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "pprof")]
pub mod handler {
use std::num::NonZeroI32;
use std::time::Duration;
use axum::extract::Query;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use common_pprof::Profiling;
use common_telemetry::logging;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{DumpPprofSnafu, Result};
/// Output format.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum Output {
/// googles pprof format report in protobuf.
Proto,
/// Simple text format.
Text,
/// svg flamegraph.
Flamegraph,
}
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
#[serde(default)]
pub struct PprofQuery {
seconds: u64,
frequency: NonZeroI32,
output: Output,
}
impl Default for PprofQuery {
fn default() -> PprofQuery {
PprofQuery {
seconds: 5,
// Safety: 99 is non zero.
frequency: NonZeroI32::new(99).unwrap(),
output: Output::Proto,
}
}
}
#[axum_macros::debug_handler]
pub async fn pprof_handler(Query(req): Query<PprofQuery>) -> Result<impl IntoResponse> {
logging::info!("start pprof, request: {:?}", req);
let profiling = Profiling::new(Duration::from_secs(req.seconds), req.frequency.into());
let body = match req.output {
Output::Proto => profiling.dump_proto().await.context(DumpPprofSnafu)?,
Output::Text => {
let report = profiling.report().await.context(DumpPprofSnafu)?;
format!("{:?}", report).into_bytes()
}
Output::Flamegraph => profiling.dump_flamegraph().await.context(DumpPprofSnafu)?,
};
logging::info!("finish pprof");
Ok((StatusCode::OK, body))
}
}
#[cfg(not(feature = "pprof"))]
pub mod handler {
use axum::http::StatusCode;
use axum::response::IntoResponse;
use crate::error::Result;
#[axum_macros::debug_handler]
pub async fn pprof_handler() -> Result<impl IntoResponse> {
Ok((
StatusCode::NOT_IMPLEMENTED,
"The 'pprof' feature is disabled",
))
}
}
pub use handler::pprof_handler;

View File

@@ -15,12 +15,20 @@
use std::task::{Context, Poll};
use std::time::Instant;
use common_telemetry::error;
use hyper::Body;
use metrics::gauge;
use metrics_process::Collector;
use once_cell::sync::Lazy;
use snafu::ResultExt;
use tikv_jemalloc_ctl::stats::{allocated_mib, resident_mib};
use tikv_jemalloc_ctl::{epoch, epoch_mib, stats};
use tonic::body::BoxBody;
use tower::{Layer, Service};
use crate::error;
use crate::error::UpdateJemallocMetricsSnafu;
pub(crate) const METRIC_DB_LABEL: &str = "db";
pub(crate) const METRIC_CODE_LABEL: &str = "code";
pub(crate) const METRIC_TYPE_LABEL: &str = "type";
@@ -59,6 +67,8 @@ pub(crate) const METRIC_GRPC_REQUESTS_ELAPSED: &str = "servers.grpc_requests_ela
pub(crate) const METRIC_METHOD_LABEL: &str = "method";
pub(crate) const METRIC_PATH_LABEL: &str = "path";
pub(crate) const METRIC_STATUS_LABEL: &str = "status";
pub(crate) const METRIC_JEMALLOC_RESIDENT: &str = "sys.jemalloc.resident";
pub(crate) const METRIC_JEMALLOC_ALLOCATED: &str = "sys.jemalloc.allocated";
/// Prometheus style process metrics collector.
pub(crate) static PROCESS_COLLECTOR: Lazy<Collector> = Lazy::new(|| {
@@ -68,6 +78,49 @@ pub(crate) static PROCESS_COLLECTOR: Lazy<Collector> = Lazy::new(|| {
collector
});
pub(crate) static JEMALLOC_COLLECTOR: Lazy<Option<JemallocCollector>> = Lazy::new(|| {
let collector = JemallocCollector::try_new()
.map_err(|e| {
error!(e; "Failed to retrieve jemalloc metrics");
e
})
.ok();
collector.map(|c| {
if let Err(e) = c.update() {
error!(e; "Failed to update jemalloc metrics");
};
c
})
});
pub(crate) struct JemallocCollector {
epoch: epoch_mib,
allocated: allocated_mib,
resident: resident_mib,
}
impl JemallocCollector {
pub(crate) fn try_new() -> error::Result<Self> {
let e = epoch::mib().context(UpdateJemallocMetricsSnafu)?;
let allocated = stats::allocated::mib().context(UpdateJemallocMetricsSnafu)?;
let resident = stats::resident::mib().context(UpdateJemallocMetricsSnafu)?;
Ok(Self {
epoch: e,
allocated,
resident,
})
}
pub(crate) fn update(&self) -> error::Result<()> {
self.epoch.advance().context(UpdateJemallocMetricsSnafu)?;
let allocated = self.allocated.read().context(UpdateJemallocMetricsSnafu)?;
let resident = self.resident.read().context(UpdateJemallocMetricsSnafu)?;
gauge!(METRIC_JEMALLOC_ALLOCATED, allocated as f64);
gauge!(METRIC_JEMALLOC_RESIDENT, resident as f64);
Ok(())
}
}
// Based on https://github.com/hyperium/tonic/blob/master/examples/src/tower/server.rs
// See https://github.com/hyperium/tonic/issues/242
/// A metrics middleware.

View File

@@ -16,6 +16,7 @@
//! Inspired by Databend's "[mysql_federated.rs](https://github.com/datafuselabs/databend/blob/ac706bf65845e6895141c96c0a10bad6fdc2d367/src/query/service/src/servers/mysql/mysql_federated.rs)".
use std::collections::HashMap;
use std::env;
use std::sync::Arc;
use common_query::Output;
@@ -30,9 +31,6 @@ use regex::bytes::RegexSet;
use regex::Regex;
use session::context::QueryContextRef;
// TODO(LFC): Include GreptimeDB's version and git commit tag etc.
const MYSQL_VERSION: &str = "8.0.26";
static SELECT_VAR_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap());
static MYSQL_CONN_JAVA_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(/\\* mysql-connector-j(.*))").unwrap());
@@ -285,7 +283,7 @@ fn check_others(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
}
let recordbatches = if SELECT_VERSION_PATTERN.is_match(query) {
Some(select_function("version()", MYSQL_VERSION))
Some(select_function("version()", &get_version()))
} else if SELECT_DATABASE_PATTERN.is_match(query) {
let schema = query_ctx.current_schema();
Some(select_function("database()", &schema))
@@ -318,8 +316,16 @@ pub(crate) fn check(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
.or_else(|| check_others(query, query_ctx))
}
// get GreptimeDB's version.
fn get_version() -> String {
format!(
"{}-greptime",
env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "unknown".to_string()),
)
}
#[cfg(test)]
mod test {
use session::context::QueryContext;
use super::*;
@@ -345,13 +351,15 @@ mod test {
}
let query = "select version()";
let expected = "\
+-----------+
| version() |
+-----------+
| 8.0.26 |
+-----------+";
test(query, expected);
let expected = format!(
r#"+----------------+
| version() |
+----------------+
| {}-greptime |
+----------------+"#,
env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "unknown".to_string())
);
test(query, &expected);
let query = "SELECT @@version_comment LIMIT 1";
let expected = "\

View File

@@ -88,9 +88,6 @@ impl MysqlInstanceShim {
trace!("Start executing query: '{}'", query);
let start = Instant::now();
// TODO(LFC): Find a better way to deal with these special federated queries:
// `check` uses regex to filter out unsupported statements emitted by MySQL's federated
// components, this is quick and dirty, there must be a better way to do it.
let output =
if let Some(output) = crate::mysql::federated::check(query, self.session.context()) {
vec![Ok(output)]

View File

@@ -157,7 +157,6 @@ impl MysqlServer {
info!("MySQL connection coming from: {}", stream.peer_addr()?);
io_runtime.spawn(async move {
increment_gauge!(crate::metrics::METRIC_MYSQL_CONNECTIONS, 1.0);
// TODO(LFC): Use `output_stream` to write large MySQL ResultSet to client.
if let Err(e) = Self::do_handle(stream, spawn_ref, spawn_config).await {
// TODO(LFC): Write this error to client as well, in MySQL text protocol.
// Looks like we have to expose opensrv-mysql's `PacketWriter`?

View File

@@ -26,10 +26,11 @@ use pgwire::messages::startup::Authentication;
use pgwire::messages::{PgWireBackendMessage, PgWireFrontendMessage};
use session::context::UserInfo;
use session::Session;
use snafu::IntoError;
use super::PostgresServerHandler;
use crate::auth::{Identity, Password, UserProviderRef};
use crate::error::Result;
use crate::error::{AuthSnafu, Result};
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
pub(crate) struct PgLoginVerifier {
@@ -106,7 +107,7 @@ impl PgLoginVerifier {
format!("{}", e.status_code())
)]
);
Err(e.into())
Err(AuthSnafu.into_error(e))
} else {
Ok(true)
}

View File

@@ -42,7 +42,7 @@ use schemars::JsonSchema;
use serde::de::{self, MapAccess, Visitor};
use serde::{Deserialize, Serialize};
use session::context::{QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use snafu::{ensure, Location, OptionExt, ResultExt};
use tokio::sync::oneshot::Sender;
use tokio::sync::{oneshot, Mutex};
use tower::ServiceBuilder;
@@ -52,7 +52,7 @@ use tower_http::trace::TraceLayer;
use crate::auth::UserProviderRef;
use crate::error::{
AlreadyStartedSnafu, CollectRecordbatchSnafu, Error, InternalSnafu, NotSupportedSnafu, Result,
AlreadyStartedSnafu, CollectRecordbatchSnafu, Error, InternalSnafu, InvalidQuerySnafu, Result,
StartHttpSnafu, UnexpectedResultSnafu,
};
use crate::http::authorize::HttpAuth;
@@ -97,6 +97,7 @@ impl PromServer {
.route("/query", routing::post(instant_query).get(instant_query))
.route("/query_range", routing::post(range_query).get(range_query))
.route("/labels", routing::post(labels_query).get(labels_query))
.route("/series", routing::post(series_query).get(series_query))
.route(
"/label/:label_name/values",
routing::get(label_values_query),
@@ -191,6 +192,7 @@ pub struct PromData {
pub enum PromResponse {
PromData(PromData),
Labels(Vec<String>),
Series(Vec<HashMap<String, String>>),
LabelValues(Vec<String>),
}
@@ -242,7 +244,7 @@ impl PromJsonResponse {
pub async fn from_query_result(
result: Result<Output>,
metric_name: String,
result_type: Option<ValueType>,
result_type: ValueType,
) -> Json<Self> {
let response: Result<Json<Self>> = try {
let json = match result? {
@@ -269,7 +271,7 @@ impl PromJsonResponse {
json
};
let result_type_string = result_type.map(|t| t.to_string()).unwrap_or_default();
let result_type_string = result_type.to_string();
match response {
Ok(resp) => resp,
@@ -293,7 +295,7 @@ impl PromJsonResponse {
fn record_batches_to_data(
batches: RecordBatches,
metric_name: String,
result_type: Option<ValueType>,
result_type: ValueType,
) -> Result<PromResponse> {
// infer semantic type of each column from schema.
// TODO(ruihang): wish there is a better way to do this.
@@ -388,27 +390,21 @@ impl PromJsonResponse {
.map(|(tags, mut values)| {
let metric = tags.into_iter().collect();
match result_type {
Some(ValueType::Vector) | Some(ValueType::Scalar) | Some(ValueType::String) => {
Ok(PromSeries {
metric,
value: values.pop(),
..Default::default()
})
}
Some(ValueType::Matrix) => Ok(PromSeries {
ValueType::Vector | ValueType::Scalar | ValueType::String => Ok(PromSeries {
metric,
value: values.pop(),
..Default::default()
}),
ValueType::Matrix => Ok(PromSeries {
metric,
values,
..Default::default()
}),
other => NotSupportedSnafu {
feat: format!("PromQL result type {other:?}"),
}
.fail(),
}
})
.collect::<Result<Vec<_>>>()?;
let result_type_string = result_type.map(|t| t.to_string()).unwrap_or_default();
let result_type_string = result_type.to_string();
let data = PromResponse::PromData(PromData {
result_type: result_type_string,
result,
@@ -450,8 +446,10 @@ pub async fn instant_query(
let query_ctx = QueryContext::with(catalog, schema);
let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
let (metric_name, result_type) =
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default();
let (metric_name, result_type) = match retrieve_metric_name_and_result_type(&prom_query.query) {
Ok((metric_name, result_type)) => (metric_name.unwrap_or_default(), result_type),
Err(err) => return PromJsonResponse::error(err.status_code().to_string(), err.to_string()),
};
PromJsonResponse::from_query_result(result, metric_name, result_type).await
}
@@ -484,9 +482,11 @@ pub async fn range_query(
let query_ctx = QueryContext::with(catalog, schema);
let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
let (metric_name, _) =
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default();
PromJsonResponse::from_query_result(result, metric_name, Some(ValueType::Matrix)).await
let metric_name = match retrieve_metric_name_and_result_type(&prom_query.query) {
Err(err) => return PromJsonResponse::error(err.status_code().to_string(), err.to_string()),
Ok((metric_name, _)) => metric_name.unwrap_or_default(),
};
PromJsonResponse::from_query_result(result, metric_name, ValueType::Matrix).await
}
#[derive(Debug, Default, Serialize, JsonSchema)]
@@ -593,6 +593,30 @@ pub async fn labels_query(
PromJsonResponse::success(PromResponse::Labels(sorted_labels))
}
async fn retrieve_series_from_query_result(
result: Result<Output>,
series: &mut Vec<HashMap<String, String>>,
table_name: &str,
) -> Result<()> {
match result? {
Output::RecordBatches(batches) => {
record_batches_to_series(batches, series, table_name)?;
Ok(())
}
Output::Stream(stream) => {
let batches = RecordBatches::try_collect(stream)
.await
.context(CollectRecordbatchSnafu)?;
record_batches_to_series(batches, series, table_name)?;
Ok(())
}
Output::AffectedRows(_) => Err(Error::UnexpectedResult {
reason: "expected data result, but got affected rows".to_string(),
location: Location::default(),
}),
}
}
/// Retrieve labels name from query result
async fn retrieve_labels_name_from_query_result(
result: Result<Output>,
@@ -617,6 +641,28 @@ async fn retrieve_labels_name_from_query_result(
}
}
fn record_batches_to_series(
batches: RecordBatches,
series: &mut Vec<HashMap<String, String>>,
table_name: &str,
) -> Result<()> {
for batch in batches.iter() {
for row in batch.rows() {
let mut element: HashMap<String, String> = row
.iter()
.enumerate()
.map(|(idx, column)| {
let column_name = batch.schema.column_name_by_index(idx);
(column_name.to_string(), column.to_string())
})
.collect();
element.insert("__name__".to_string(), table_name.to_string());
series.push(element);
}
}
Ok(())
}
/// Retrieve labels name from record batches
fn record_batches_to_labels_name(
batches: RecordBatches,
@@ -675,12 +721,13 @@ fn record_batches_to_labels_name(
pub(crate) fn retrieve_metric_name_and_result_type(
promql: &str,
) -> Option<(String, Option<ValueType>)> {
let promql_expr = promql_parser::parser::parse(promql).ok()?;
let metric_name = promql_expr_to_metric_name(&promql_expr)?;
let result_type = Some(promql_expr.value_type());
) -> Result<(Option<String>, ValueType)> {
let promql_expr = promql_parser::parser::parse(promql)
.map_err(|reason| InvalidQuerySnafu { reason }.build())?;
let metric_name = promql_expr_to_metric_name(&promql_expr);
let result_type = promql_expr.value_type();
Some((metric_name, result_type))
Ok((metric_name, result_type))
}
fn promql_expr_to_metric_name(expr: &PromqlExpr) -> Option<String> {
@@ -803,14 +850,12 @@ async fn retrieve_label_values_from_record_batch(
ConcreteDataType::String(_) => {}
_ => return Ok(()),
}
for batch in batches.iter() {
let label_column = batch
.column(label_col_idx)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
for row_index in 0..batch.num_rows() {
if let Some(label_value) = label_column.get_data(row_index) {
labels_values.insert(label_value.to_string());
@@ -820,3 +865,57 @@ async fn retrieve_label_values_from_record_batch(
Ok(())
}
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct SeriesQuery {
start: Option<String>,
end: Option<String>,
#[serde(flatten)]
matches: Matches,
db: Option<String>,
}
#[axum_macros::debug_handler]
pub async fn series_query(
State(handler): State<PromHandlerRef>,
Query(params): Query<SeriesQuery>,
Form(form_params): Form<SeriesQuery>,
) -> Json<PromJsonResponse> {
let mut queries: Vec<String> = params.matches.0;
if queries.is_empty() {
queries = form_params.matches.0;
}
if queries.is_empty() {
return PromJsonResponse::error("Unsupported", "match[] parameter is required");
}
let start = params
.start
.or(form_params.start)
.unwrap_or_else(yesterday_rfc3339);
let end = params
.end
.or(form_params.end)
.unwrap_or_else(current_time_rfc3339);
let db = &params.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
let (catalog, schema) = super::parse_catalog_and_schema_from_client_database_name(db);
let query_ctx = Arc::new(QueryContext::with(catalog, schema));
let mut series = Vec::new();
for query in queries {
let table_name = query.clone();
let prom_query = PromQuery {
query,
start: start.clone(),
end: end.clone(),
// TODO: find a better value for step
step: DEFAULT_LOOKBACK_STRING.to_string(),
};
let result = handler.do_query(&prom_query, query_ctx.clone()).await;
if let Err(err) = retrieve_series_from_query_result(result, &mut series, &table_name).await
{
return PromJsonResponse::error(err.status_code().to_string(), err.to_string());
}
}
PromJsonResponse::success(PromResponse::Series(series))
}

View File

@@ -15,13 +15,13 @@
//! prometheus protocol supportings
//! handles prometheus remote_write, remote_read logic
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashMap};
use std::hash::{Hash, Hasher};
use api::prometheus::remote::label_matcher::Type as MatcherType;
use api::prometheus::remote::{Label, Query, Sample, TimeSeries, WriteRequest};
use api::v1::column::SemanticType;
use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest, InsertRequests};
use api::v1::{InsertRequest as GrpcInsertRequest, InsertRequests};
use common_grpc::writer::{LinesWriter, Precision};
use common_recordbatch::{RecordBatch, RecordBatches};
use common_time::timestamp::TimeUnit;
use datatypes::prelude::{ConcreteDataType, Value};
@@ -284,83 +284,68 @@ fn recordbatch_to_timeseries(table: &str, recordbatch: RecordBatch) -> Result<Ve
}
pub fn to_grpc_insert_requests(request: WriteRequest) -> Result<(InsertRequests, usize)> {
let (inserts, samples_counts) = itertools::process_results(
request.timeseries.into_iter().map(to_grpc_insert_request),
|x| x.unzip::<_, _, Vec<_>, Vec<_>>(),
)?;
Ok((
InsertRequests { inserts },
samples_counts.into_iter().sum::<usize>(),
))
}
let mut writers: HashMap<String, LinesWriter> = HashMap::new();
for timeseries in &request.timeseries {
let table_name = timeseries
.labels
.iter()
.find(|label| {
// The metric name is a special label
label.name == METRIC_NAME_LABEL
})
.context(error::InvalidPromRemoteRequestSnafu {
msg: "missing '__name__' label in timeseries",
})?
.value
.clone();
fn to_grpc_insert_request(timeseries: TimeSeries) -> Result<(GrpcInsertRequest, usize)> {
let samples_count = timeseries.samples.len();
let writer = writers
.entry(table_name)
.or_insert_with(|| LinesWriter::with_lines(16));
// For each sample
for sample in &timeseries.samples {
// Insert labels first.
for label in &timeseries.labels {
// The metric name is a special label
if label.name == METRIC_NAME_LABEL {
continue;
}
// TODO(dennis): save exemplars into a column
let labels = timeseries.labels;
let samples = timeseries.samples;
writer
.write_tag(&label.name, &label.value)
.context(error::PromSeriesWriteSnafu)?;
}
// Insert sample timestamp.
writer
.write_ts(
TIMESTAMP_COLUMN_NAME,
(sample.timestamp, Precision::Millisecond),
)
.context(error::PromSeriesWriteSnafu)?;
// Insert sample value.
writer
.write_f64(FIELD_COLUMN_NAME, sample.value)
.context(error::PromSeriesWriteSnafu)?;
let row_count = samples.len();
let mut columns = Vec::with_capacity(2 + labels.len());
let ts_column = Column {
column_name: TIMESTAMP_COLUMN_NAME.to_string(),
values: Some(column::Values {
ts_millisecond_values: samples.iter().map(|x| x.timestamp).collect(),
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
};
columns.push(ts_column);
let field_column = Column {
column_name: FIELD_COLUMN_NAME.to_string(),
values: Some(column::Values {
f64_values: samples.iter().map(|x| x.value).collect(),
..Default::default()
}),
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float64 as i32,
..Default::default()
};
columns.push(field_column);
let mut table_name = None;
for label in labels {
let tagk = label.name;
let tagv = label.value;
// The metric name is a special label
if tagk == METRIC_NAME_LABEL {
table_name = Some(tagv);
continue;
writer.commit();
}
columns.push(Column {
column_name: tagk.to_string(),
values: Some(column::Values {
string_values: std::iter::repeat(tagv).take(row_count).collect(),
..Default::default()
}),
semantic_type: SemanticType::Tag as i32,
datatype: ColumnDataType::String as i32,
..Default::default()
});
}
let request = GrpcInsertRequest {
table_name: table_name.context(error::InvalidPromRemoteRequestSnafu {
msg: "missing '__name__' label in timeseries",
})?,
region_number: 0,
columns,
row_count: row_count as u32,
};
Ok((request, samples_count))
let mut sample_counts = 0;
let inserts = writers
.into_iter()
.map(|(table_name, writer)| {
let (columns, row_count) = writer.finish();
sample_counts += row_count as usize;
GrpcInsertRequest {
table_name,
region_number: 0,
columns,
row_count,
}
})
.collect();
Ok((InsertRequests { inserts }, sample_counts))
}
#[inline]
@@ -516,13 +501,16 @@ mod tests {
..Default::default()
};
let exprs = to_grpc_insert_requests(write_request).unwrap().0.inserts;
let mut exprs = to_grpc_insert_requests(write_request).unwrap().0.inserts;
exprs.sort_unstable_by(|l, r| l.table_name.cmp(&r.table_name));
assert_eq!(3, exprs.len());
assert_eq!("metric1", exprs[0].table_name);
assert_eq!("metric2", exprs[1].table_name);
assert_eq!("metric3", exprs[2].table_name);
let expr = exprs.get(0).unwrap();
let expr = exprs.get_mut(0).unwrap();
expr.columns
.sort_unstable_by(|l, r| l.column_name.cmp(&r.column_name));
let columns = &expr.columns;
let row_count = expr.row_count;
@@ -548,7 +536,9 @@ mod tests {
vec!["spark", "spark"]
);
let expr = exprs.get(1).unwrap();
let expr = exprs.get_mut(1).unwrap();
expr.columns
.sort_unstable_by(|l, r| l.column_name.cmp(&r.column_name));
let columns = &expr.columns;
let row_count = expr.row_count;
@@ -568,18 +558,20 @@ mod tests {
vec![3.0, 4.0]
);
assert_eq!(columns[2].column_name, "instance");
assert_eq!(columns[2].column_name, "idc");
assert_eq!(
columns[2].values.as_ref().unwrap().string_values,
vec!["test_host1", "test_host1"]
);
assert_eq!(columns[3].column_name, "idc");
assert_eq!(
columns[3].values.as_ref().unwrap().string_values,
vec!["z001", "z001"]
);
assert_eq!(columns[3].column_name, "instance");
assert_eq!(
columns[3].values.as_ref().unwrap().string_values,
vec!["test_host1", "test_host1"]
);
let expr = exprs.get(2).unwrap();
let expr = exprs.get_mut(2).unwrap();
expr.columns
.sort_unstable_by(|l, r| l.column_name.cmp(&r.column_name));
let columns = &expr.columns;
let row_count = expr.row_count;
@@ -587,27 +579,27 @@ mod tests {
assert_eq!(3, row_count);
assert_eq!(columns.len(), 4);
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
assert_eq!(columns[0].column_name, "app");
assert_eq!(
columns[0].values.as_ref().unwrap().ts_millisecond_values,
columns[0].values.as_ref().unwrap().string_values,
vec!["biz", "biz", "biz"]
);
assert_eq!(columns[1].column_name, TIMESTAMP_COLUMN_NAME);
assert_eq!(
columns[1].values.as_ref().unwrap().ts_millisecond_values,
vec![1000, 2000, 3000]
);
assert_eq!(columns[1].column_name, FIELD_COLUMN_NAME);
assert_eq!(columns[2].column_name, FIELD_COLUMN_NAME);
assert_eq!(
columns[1].values.as_ref().unwrap().f64_values,
columns[2].values.as_ref().unwrap().f64_values,
vec![5.0, 6.0, 7.0]
);
assert_eq!(columns[2].column_name, "idc");
assert_eq!(
columns[2].values.as_ref().unwrap().string_values,
vec!["z002", "z002", "z002"]
);
assert_eq!(columns[3].column_name, "app");
assert_eq!(columns[3].column_name, "idc");
assert_eq!(
columns[3].values.as_ref().unwrap().string_values,
vec!["biz", "biz", "biz"]
vec!["z002", "z002", "z002"]
);
}

View File

@@ -43,7 +43,6 @@ pub trait SqlQueryHandler {
query_ctx: QueryContextRef,
) -> Vec<std::result::Result<Output, Self::Error>>;
// TODO(LFC): revisit this for mysql prepared statement
async fn do_describe(
&self,
stmt: Statement,

View File

@@ -98,13 +98,13 @@ pub enum Error {
#[snafu(display("Invalid default constraint, column: {}, source: {}", column, source))]
InvalidDefault {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Failed to serialize column default constraint, source: {}", source))]
SerializeColumnDefaultConstraint {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -113,7 +113,7 @@ pub enum Error {
source
))]
ConvertToGrpcDataType {
#[snafu(backtrace)]
location: Location,
source: api::error::Error,
},

View File

@@ -41,7 +41,7 @@ impl BenchContext {
batch_size,
..Default::default()
};
let iter = self.memtable.iter(&iter_ctx).unwrap();
let iter = self.memtable.iter(iter_ctx).unwrap();
for batch in iter {
batch.unwrap();
read_count += batch_size;

View File

@@ -62,7 +62,6 @@ impl RegionDescBuilder {
row_key: self.key_builder.build().unwrap(),
default_cf: self.default_cf_builder.build().unwrap(),
extra_cfs: Vec::new(),
compaction_time_window: None,
}
}

View File

@@ -220,7 +220,9 @@ impl ChunkReaderBuilder {
.batch_size(self.iter_ctx.batch_size);
for mem in &self.memtables {
let iter = mem.iter(&self.iter_ctx)?;
let mut iter_ctx = self.iter_ctx.clone();
iter_ctx.time_range = Some(*time_range);
let iter = mem.iter(iter_ctx)?;
reader_builder = reader_builder.push_batch_iter(iter);
}

View File

@@ -120,6 +120,7 @@ impl<S: LogStore> Picker for SimplePicker<S> {
}
let ctx = &PickerContext::with(req.compaction_time_window);
for level_num in 0..levels.level_num() {
let level = levels.level(level_num as u8);
let (compaction_time_window, outputs) = self.strategy.pick(ctx, level);
@@ -130,8 +131,8 @@ impl<S: LogStore> Picker for SimplePicker<S> {
}
debug!(
"Found SST files to compact {:?} on level: {}",
outputs, level_num
"Found SST files to compact {:?} on level: {}, compaction window: {:?}",
outputs, level_num, compaction_time_window,
);
return Ok(Some(CompactionTaskImpl {
schema: req.schema(),

View File

@@ -47,19 +47,24 @@ impl Strategy for SimpleTimeWindowStrategy {
if files.is_empty() {
return (None, vec![]);
}
let time_bucket = ctx
.compaction_time_window()
.unwrap_or_else(|| infer_time_bucket(&files));
let buckets = calculate_time_buckets(time_bucket, &files);
debug!("File bucket:{}, file groups: {:?}", time_bucket, buckets);
let time_window = ctx.compaction_time_window().unwrap_or_else(|| {
let inferred = infer_time_bucket(&files);
debug!(
"Compaction window is not present, inferring from files: {:?}",
inferred
);
inferred
});
let buckets = calculate_time_buckets(time_window, &files);
debug!("File bucket:{}, file groups: {:?}", time_window, buckets);
(
Some(time_bucket),
Some(time_window),
buckets
.into_iter()
.map(|(bound, files)| CompactionOutput {
output_level: 1,
bucket_bound: bound,
bucket: time_bucket,
bucket: time_window,
inputs: files,
})
.collect(),

View File

@@ -102,7 +102,6 @@ impl<S: LogStore> CompactionTaskImpl<S> {
}
/// Writes updated SST info into manifest.
// TODO(etolbakov): we are not persisting inferred compaction_time_window (#1083)[https://github.com/GreptimeTeam/greptimedb/pull/1083]
async fn write_manifest_and_apply(
&self,
output: HashSet<FileMeta>,
@@ -116,6 +115,7 @@ impl<S: LogStore> CompactionTaskImpl<S> {
flushed_sequence: None,
files_to_add: Vec::from_iter(output.into_iter()),
files_to_remove: Vec::from_iter(input.into_iter()),
compaction_time_window: self.compaction_time_window,
};
debug!(
"Compacted region: {}, region edit: {:?}",
@@ -151,7 +151,10 @@ impl<S: LogStore> CompactionTask for CompactionTaskImpl<S> {
let input_ids = compacted.iter().map(|f| f.file_id).collect::<Vec<_>>();
let output_ids = output.iter().map(|f| f.file_id).collect::<Vec<_>>();
info!("Compacting SST files, input: {input_ids:?}, output: {output_ids:?}");
info!(
"Compacting SST files, input: {:?}, output: {:?}, window: {:?}",
input_ids, output_ids, self.compaction_time_window
);
self.write_manifest_and_apply(output, compacted)
.await
.map_err(|e| {

View File

@@ -217,7 +217,7 @@ mod tests {
seq.fetch_add(1, Ordering::Relaxed);
}
let iter = memtable.iter(&IterContext::default()).unwrap();
let iter = memtable.iter(IterContext::default()).unwrap();
let file_path = sst_file_id.as_parquet();
let writer = ParquetWriter::new(&file_path, Source::Iter(iter), object_store.clone());

View File

@@ -28,7 +28,7 @@ use store_api::storage::{
};
use crate::compaction::CompactionSchedulerRef;
use crate::config::{EngineConfig, DEFAULT_REGION_WRITE_BUFFER_SIZE};
use crate::config::EngineConfig;
use crate::error::{self, Error, Result};
use crate::file_purger::{FilePurgeHandler, FilePurgerRef};
use crate::flush::{
@@ -89,7 +89,7 @@ impl<S: LogStore> StorageEngine for EngineImpl<S> {
async fn drop_region(&self, _ctx: &EngineContext, region: Self::Region) -> Result<()> {
region.drop_region().await?;
self.inner.remove_reigon(region.name());
self.inner.remove_region(region.name());
Ok(())
}
@@ -395,7 +395,6 @@ impl<S: LogStore> EngineInner<S> {
name,
&self.config,
opts.ttl,
opts.compaction_time_window,
)
.await?;
@@ -441,7 +440,6 @@ impl<S: LogStore> EngineInner<S> {
&region_name,
&self.config,
opts.ttl,
opts.compaction_time_window,
)
.await?;
@@ -462,7 +460,7 @@ impl<S: LogStore> EngineInner<S> {
self.regions.get_region(name)
}
fn remove_reigon(&self, name: &str) {
fn remove_region(&self, name: &str) {
self.regions.remove(name)
}
@@ -473,7 +471,6 @@ impl<S: LogStore> EngineInner<S> {
region_name: &str,
config: &EngineConfig,
region_ttl: Option<Duration>,
compaction_time_window: Option<i64>,
) -> Result<StoreConfig<S>> {
let parent_dir = util::normalize_dir(parent_dir);
@@ -504,9 +501,8 @@ impl<S: LogStore> EngineInner<S> {
engine_config: self.config.clone(),
file_purger: self.file_purger.clone(),
ttl,
compaction_time_window,
write_buffer_size: write_buffer_size
.unwrap_or(DEFAULT_REGION_WRITE_BUFFER_SIZE.as_bytes() as usize),
.unwrap_or(self.config.region_write_buffer_size.as_bytes() as usize),
})
}
@@ -553,7 +549,7 @@ mod tests {
log_file_dir: &TempDir,
region_name: &str,
region_id: u64,
ctx: &EngineContext,
config: EngineConfig,
) -> (TestEngine, TestRegion) {
let log_file_dir_path = log_file_dir.path().to_str().unwrap();
let log_store = log_store_util::create_tmp_local_file_log_store(log_file_dir_path).await;
@@ -564,8 +560,6 @@ mod tests {
builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let config = EngineConfig::default();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let engine = EngineImpl::new(
@@ -584,7 +578,7 @@ mod tests {
.build();
let region = engine
.create_region(ctx, desc, &CreateOptions::default())
.create_region(&EngineContext::default(), desc, &CreateOptions::default())
.await
.unwrap();
@@ -606,18 +600,38 @@ mod tests {
let region_name = "region-0";
let region_id = 123456;
let ctx = EngineContext::default();
let config = EngineConfig::default();
let (engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, &ctx).await;
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(region_name, region.name());
let ctx = EngineContext::default();
let region2 = engine.get_region(&ctx, region_name).unwrap().unwrap();
assert_eq!(region_name, region2.name());
assert!(engine.get_region(&ctx, "no such region").unwrap().is_none());
}
#[tokio::test]
async fn test_create_region_with_buffer_size() {
let dir = create_temp_dir("test_buffer_size");
let log_file_dir = create_temp_dir("test_buffer_wal");
let region_name = "region-0";
let region_id = 123456;
let mut config = EngineConfig::default();
let expect_buffer_size = config.region_write_buffer_size / 2;
config.region_write_buffer_size = expect_buffer_size;
let (_engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(
expect_buffer_size.as_bytes() as usize,
region.write_buffer_size().await
);
}
#[tokio::test]
async fn test_drop_region() {
common_telemetry::init_default_ut_logging();
@@ -626,10 +640,10 @@ mod tests {
let region_name = "test_region";
let region_id = 123456;
let ctx = EngineContext::default();
let config = EngineConfig::default();
let (engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, &ctx).await;
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(region_name, region.name());
@@ -648,6 +662,7 @@ mod tests {
// Flush memtable to sst.
region.flush(&FlushContext::default()).await.unwrap();
let ctx = EngineContext::default();
engine
.close_region(&ctx, region.name(), &CloseOptions::default())
.await

View File

@@ -38,7 +38,7 @@ pub enum Error {
#[snafu(display("Invalid region descriptor, region: {}, source: {}", region, source))]
InvalidRegionDesc {
region: String,
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
@@ -53,7 +53,7 @@ pub enum Error {
#[snafu(display("Failed to write to buffer, source: {}", source))]
WriteBuffer {
#[snafu(backtrace)]
location: Location,
source: common_datasource::error::Error,
},
@@ -147,7 +147,7 @@ pub enum Error {
))]
WriteWal {
region_id: RegionId,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -218,7 +218,7 @@ pub enum Error {
#[snafu(display("Failed to read WAL, region_id: {}, source: {}", region_id, source))]
ReadWal {
region_id: RegionId,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -229,7 +229,7 @@ pub enum Error {
))]
MarkWalObsolete {
region_id: u64,
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
@@ -265,14 +265,14 @@ pub enum Error {
#[snafu(display("Failed to convert store schema, file: {}, source: {}", file, source))]
ConvertStoreSchema {
file: String,
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
#[snafu(display("Invalid raw region metadata, region: {}, source: {}", region, source))]
InvalidRawRegion {
region: String,
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
@@ -281,13 +281,13 @@ pub enum Error {
#[snafu(display("Invalid projection, source: {}", source))]
InvalidProjection {
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
#[snafu(display("Failed to push data to batch builder, source: {}", source))]
PushBatch {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -297,19 +297,19 @@ pub enum Error {
#[snafu(display("Failed to filter column {}, source: {}", name, source))]
FilterColumn {
name: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Invalid alter request, source: {}", source))]
InvalidAlterRequest {
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
#[snafu(display("Failed to alter metadata, source: {}", source))]
AlterMetadata {
#[snafu(backtrace)]
location: Location,
source: MetadataError,
},
@@ -320,7 +320,7 @@ pub enum Error {
))]
CreateDefault {
name: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -353,7 +353,7 @@ pub enum Error {
))]
CreateDefaultToRead {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -367,7 +367,7 @@ pub enum Error {
))]
ConvertChunk {
name: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -376,7 +376,7 @@ pub enum Error {
#[snafu(display("Failed to create record batch for write batch, source:{}", source))]
CreateRecordBatch {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -451,13 +451,13 @@ pub enum Error {
#[snafu(display("Failed to start manifest gc task: {}", source))]
StartManifestGcTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},
#[snafu(display("Failed to stop manifest gc task: {}", source))]
StopManifestGcTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},
@@ -475,7 +475,7 @@ pub enum Error {
#[snafu(display("Failed to calculate SST expire time, source: {}", source))]
TtlCalculation {
#[snafu(backtrace)]
location: Location,
source: common_time::error::Error,
},
@@ -501,13 +501,13 @@ pub enum Error {
#[snafu(display("Failed to start picking task for flush: {}", source))]
StartPickTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},
#[snafu(display("Failed to stop picking task for flush: {}", source))]
StopPickTask {
#[snafu(backtrace)]
location: Location,
source: RuntimeError,
},

View File

@@ -143,7 +143,7 @@ mod tests {
&[(Some(1), Some(1)), (Some(2), Some(2))],
);
let iter = memtable.iter(&IterContext::default()).unwrap();
let iter = memtable.iter(IterContext::default()).unwrap();
let sst_path = "table1";
let layer = Arc::new(FsAccessLayer::new(sst_path, os.clone()));
let sst_info = layer

Some files were not shown because too many files have changed in this diff Show More