Compare commits

..

32 Commits

Author SHA1 Message Date
Ruihang Xia
1bd53567b4 try to run on self-hosted runner
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-13 16:01:50 +08:00
Weny Xu
803940cfa4 feat: enable azblob tests (#1765)
* feat: enable azblob tests

* fix: add missing arg
2023-06-13 07:44:57 +00:00
Weny Xu
420ae054b3 chore: add debug log for heartbeat (#1770) 2023-06-13 07:43:26 +00:00
Lei, HUANG
0f1e061f24 fix: compile issue on develop and workaround to fix failing tests cau… (#1771)
* fix: compile issue on develop and workaround to fix failing tests caused by logstore file lock

* Apply suggestions from code review

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-06-13 07:30:16 +00:00
Lei, HUANG
7961de25ad feat: persist compaction time window (#1757)
* feat: persist compaction time window

* refactor: remove useless compaction window fields

* chore: revert some useless change

* fix: some CR comments

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-13 10:15:42 +08:00
Lei, HUANG
f7d98e533b chore: fix compaction caused race condition (#1759)
* fix: set max_files_in_l0 in unit tests to avoid compaction

* refactor: pass while EngineConfig

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-12 11:19:42 +00:00
Ruihang Xia
b540d640cf fix: unstable order with union operation (#1763)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 18:16:24 +08:00
Eugene Tolbakov
51a4d660b7 feat(to_unixtime): add timestamp types as arguments (#1632)
* feat(to_unixtime): add timestamp types as arguments

* feat(to_unixtime): change the return type

* feat(to_unixtime): address code review issues

* feat(to_unixtime): fix fmt issue
2023-06-12 17:21:49 +08:00
Ruihang Xia
1b2381502e fix: bring EnforceSorting rule forward (#1754)
* fix: bring EnforceSorting rule forward

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove duplicated rules

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wrap remove logic into a method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 07:29:08 +00:00
Yingwen
0e937be3f5 fix(storage): Use region_write_buffer_size as default value (#1760) 2023-06-12 15:05:17 +08:00
Weny Xu
564c183607 chore: make MetaKvBackend public (#1761) 2023-06-12 14:13:26 +08:00
Ruihang Xia
8c78368374 refactor: replace #[snafu(backtrace)] with Location (#1753)
* remove snafu backtrace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 11:55:33 +08:00
Lei, HUANG
67c16dd631 feat: optimize some parquet writer parameter (#1758) 2023-06-12 11:46:45 +08:00
Lei, HUANG
ddcee052b2 fix: order by optimization (#1748)
* add some debug log

* fix: use lazy parquet reader in MitoTable::scan_to_stream to avoid IO in plan stage

* fix: unit tests

* fix: order-by optimization

* add some tests

* fix: move metric names to metrics.rs

* fix: some cr comments
2023-06-12 11:45:43 +08:00
王听正
7efcf868d5 refactor: Remove MySQL related options from Datanode (#1756)
* refactor: Remove MySQL related options from Datanode

remove mysql_addr and mysql_runtime_size in datanode.rs, remove command line argument mysql_addr in cmd/src/datanode.rs

#1739

* feat: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739

* refactor: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739
2023-06-12 11:00:24 +08:00
dennis zhuang
f08f726bec test: s3 manifest (#1755)
* feat: change default manifest options

* test: s3 manifest

* feat: revert checkpoint_margin to 10

* Update src/object-store/src/test_util.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-06-09 10:28:41 +00:00
Ning Sun
7437820bdc ci: correct data type for input and event check (#1752) 2023-06-09 13:59:56 +08:00
Lei, HUANG
910c950717 fix: jemalloc error does not implement Error (#1747) 2023-06-09 04:00:50 +00:00
Zou Wei
f91cd250f8 feat:make version() show greptime info. (#1749)
* feat:impl get_version() to return greptime info.

* fix: refactor test case.
2023-06-09 11:38:52 +08:00
Yingwen
115d9eea8d chore: Log version and arguments (#1744) 2023-06-09 11:38:08 +08:00
Ning Sun
bc8f236806 ci: fix using env in job.if context (#1751) 2023-06-09 11:28:29 +08:00
Yiran
fdbda51c25 chore: update document links in README.md (#1745) 2023-06-09 10:05:24 +08:00
Ning Sun
e184826353 ci: allow triggering nightly release manually (#1746)
ci: allow triggering nightly manually
2023-06-09 10:04:44 +08:00
Yingwen
5b8e54e60e feat: Add HTTP API for cpu profiling (#1694)
* chore: print source error in mem-prof

* feat(common-pprof): add pprof crate

* feat(servers): Add pprof handler to router

refactor the mem_prof handler to avoid checking feature while
registering router

* feat(servers): pprof handler support different output type

* docs(common-pprof): Add readme

* feat(common-pprof): Build guard using code in pprof-rs's example

* feat(common-pprof): use prost

* feat: don't add timeout to perf api

* feat: add feature pprof

* feat: update readme

* test: fix tests

* feat: close region in TestBase

* feat(pprof): addres comments
2023-06-07 15:25:16 +08:00
Lei, HUANG
8cda1635cc feat: make jemalloc the default allocator (#1733)
* feat: add jemalloc metrics

* fix: dep format
2023-06-06 12:11:22 +00:00
Lei, HUANG
f63ddb57c3 fix: parquet time range predicate panic (#1735)
fix: parquet reader should use store schema to build time range predicate
2023-06-06 19:11:45 +08:00
fys
d2a8fd9890 feat: add route admin api in metasrv (#1734)
* feat: add route admin api in metasrv

* fix: add license
2023-06-06 18:00:02 +08:00
LFC
91026a6820 chore: clean up some of my todos (#1723)
* chore: clean up some of my todos

* fix: ci
2023-06-06 17:25:04 +08:00
Ruihang Xia
7a60bfec2a fix: empty result type on prom query endpoint (#1732)
* adjust return type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-06 15:40:54 +08:00
Niwaka
a103614fd2 feat: support /api/v1/series for Prometheus (#1620)
* feat: support /api/v1/series for Prometheus

* chore: error handling

* feat: update tests
2023-06-06 10:29:16 +08:00
Yingwen
1b4976b077 feat: Adds some metrics for write path and flush (#1726)
* feat: more metrics

* feat: Add preprocess elapsed

* chore(storage): rename metric

* test: fix tests
2023-06-05 21:35:44 +08:00
Lei, HUANG
166fb8871e chore: bump greptimedb version 0.4.0 (#1724) 2023-06-05 18:41:53 +08:00
136 changed files with 2590 additions and 966 deletions

View File

@@ -7,20 +7,29 @@ on:
- cron: '0 0 * * 1' - cron: '0 0 * * 1'
# Mannually trigger only builds binaries. # Mannually trigger only builds binaries.
workflow_dispatch: workflow_dispatch:
inputs:
dry_run:
description: 'Skip docker push and release steps'
type: boolean
default: true
skip_test:
description: 'Do not run tests during build'
type: boolean
default: false
name: Release name: Release
env: env:
RUST_TOOLCHAIN: nightly-2023-05-03 RUST_TOOLCHAIN: nightly-2023-05-03
SCHEDULED_BUILD_VERSION_PREFIX: v0.3.0 SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
SCHEDULED_PERIOD: nightly SCHEDULED_PERIOD: nightly
CARGO_PROFILE: nightly CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: false DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
jobs: jobs:
build-macos: build-macos:
@@ -30,22 +39,22 @@ jobs:
# The file format is greptime-<os>-<arch> # The file format is greptime-<os>-<arch>
include: include:
- arch: aarch64-apple-darwin - arch: aarch64-apple-darwin
os: macos-latest os: self-hosted
file: greptime-darwin-arm64 file: greptime-darwin-arm64
continue-on-error: false continue-on-error: false
opts: "-F servers/dashboard" opts: "-F servers/dashboard"
- arch: x86_64-apple-darwin - arch: x86_64-apple-darwin
os: macos-latest os: self-hosted
file: greptime-darwin-amd64 file: greptime-darwin-amd64
continue-on-error: false continue-on-error: false
opts: "-F servers/dashboard" opts: "-F servers/dashboard"
- arch: aarch64-apple-darwin - arch: aarch64-apple-darwin
os: macos-latest os: self-hosted
file: greptime-darwin-arm64-pyo3 file: greptime-darwin-arm64-pyo3
continue-on-error: false continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard" opts: "-F pyo3_backend,servers/dashboard"
- arch: x86_64-apple-darwin - arch: x86_64-apple-darwin
os: macos-latest os: self-hosted
file: greptime-darwin-amd64-pyo3 file: greptime-darwin-amd64-pyo3
continue-on-error: false continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard" opts: "-F pyo3_backend,servers/dashboard"
@@ -281,7 +290,7 @@ jobs:
name: Build docker image name: Build docker image
needs: [build-linux, build-macos] needs: [build-linux, build-macos]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch' if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v3 uses: actions/checkout@v3
@@ -294,7 +303,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD} - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash shell: bash
if: github.event_name == 'schedule' if: github.event_name != 'push'
run: | run: |
buildTime=`date "+%Y%m%d"` buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }} SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -302,7 +311,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0. - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash shell: bash
if: github.event_name != 'schedule' if: github.event_name == 'push'
run: | run: |
VERSION=${{ github.ref_name }} VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
@@ -367,7 +376,7 @@ jobs:
# Release artifacts only when all the artifacts are built successfully. # Release artifacts only when all the artifacts are built successfully.
needs: [build-linux, build-macos, docker] needs: [build-linux, build-macos, docker]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch' if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v3 uses: actions/checkout@v3
@@ -377,7 +386,7 @@ jobs:
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313. - name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash shell: bash
if: github.event_name == 'schedule' if: github.event_name != 'push'
run: | run: |
buildTime=`date "+%Y%m%d"` buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
@@ -395,13 +404,13 @@ jobs:
fi fi
- name: Create scheduled build git tag - name: Create scheduled build git tag
if: github.event_name == 'schedule' if: github.event_name != 'push'
run: | run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }} git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags. - name: Publish scheduled release # configure the different release title and tags.
uses: ncipollo/release-action@v1 uses: ncipollo/release-action@v1
if: github.event_name == 'schedule' if: github.event_name != 'push'
with: with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}" name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
prerelease: ${{ env.prerelease }} prerelease: ${{ env.prerelease }}
@@ -413,7 +422,7 @@ jobs:
- name: Publish release - name: Publish release
uses: ncipollo/release-action@v1 uses: ncipollo/release-action@v1
if: github.event_name != 'schedule' if: github.event_name == 'push'
with: with:
name: "${{ github.ref_name }}" name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }} prerelease: ${{ env.prerelease }}
@@ -426,7 +435,7 @@ jobs:
name: Push docker image to alibaba cloud container registry name: Push docker image to alibaba cloud container registry
needs: [docker] needs: [docker]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch' if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
continue-on-error: true continue-on-error: true
steps: steps:
- name: Checkout sources - name: Checkout sources
@@ -447,7 +456,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD} - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash shell: bash
if: github.event_name == 'schedule' if: github.event_name != 'push'
run: | run: |
buildTime=`date "+%Y%m%d"` buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }} SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -455,7 +464,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0. - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash shell: bash
if: github.event_name != 'schedule' if: github.event_name == 'push'
run: | run: |
VERSION=${{ github.ref_name }} VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV

340
Cargo.lock generated
View File

@@ -64,9 +64,9 @@ dependencies = [
[[package]] [[package]]
name = "aho-corasick" name = "aho-corasick"
version = "1.0.1" version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
dependencies = [ dependencies = [
"memchr", "memchr",
] ]
@@ -199,7 +199,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]] [[package]]
name = "api" name = "api"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arrow-flight", "arrow-flight",
"common-base", "common-base",
@@ -831,9 +831,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]] [[package]]
name = "bcder" name = "bcder"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69dfb7dc0d4aee3f8c723c43553b55662badf692b541ff8e4426df75dae8da9a" checksum = "ab26f019795af36086f2ca879aeeaae7566bdfd2fe0821a0328d3fdd9d1da2d9"
dependencies = [ dependencies = [
"bytes", "bytes",
"smallvec", "smallvec",
@@ -841,10 +841,10 @@ dependencies = [
[[package]] [[package]]
name = "benchmarks" name = "benchmarks"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arrow", "arrow",
"clap 4.3.0", "clap 4.3.2",
"client", "client",
"indicatif", "indicatif",
"itertools", "itertools",
@@ -1224,7 +1224,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]] [[package]]
name = "catalog" name = "catalog"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"arc-swap", "arc-swap",
@@ -1445,20 +1445,20 @@ dependencies = [
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.3.0" version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" checksum = "401a4694d2bf92537b6867d94de48c4842089645fdcdf6c71865b175d836e9c2"
dependencies = [ dependencies = [
"clap_builder", "clap_builder",
"clap_derive 4.3.0", "clap_derive 4.3.2",
"once_cell", "once_cell",
] ]
[[package]] [[package]]
name = "clap_builder" name = "clap_builder"
version = "4.3.0" version = "4.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980"
dependencies = [ dependencies = [
"anstream", "anstream",
"anstyle", "anstyle",
@@ -1482,9 +1482,9 @@ dependencies = [
[[package]] [[package]]
name = "clap_derive" name = "clap_derive"
version = "4.3.0" version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f"
dependencies = [ dependencies = [
"heck", "heck",
"proc-macro2", "proc-macro2",
@@ -1509,7 +1509,7 @@ checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]] [[package]]
name = "client" name = "client"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"arrow-flight", "arrow-flight",
@@ -1534,7 +1534,7 @@ dependencies = [
"prost", "prost",
"rand", "rand",
"snafu", "snafu",
"substrait 0.2.0", "substrait 0.4.0",
"substrait 0.7.5", "substrait 0.7.5",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
@@ -1571,7 +1571,7 @@ dependencies = [
[[package]] [[package]]
name = "cmd" name = "cmd"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anymap", "anymap",
"build-data", "build-data",
@@ -1601,9 +1601,8 @@ dependencies = [
"servers", "servers",
"session", "session",
"snafu", "snafu",
"substrait 0.2.0", "substrait 0.4.0",
"temp-env", "temp-env",
"tikv-jemalloc-ctl",
"tikv-jemallocator", "tikv-jemallocator",
"tokio", "tokio",
"toml", "toml",
@@ -1634,7 +1633,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]] [[package]]
name = "common-base" name = "common-base"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anymap", "anymap",
"bitvec", "bitvec",
@@ -1648,7 +1647,7 @@ dependencies = [
[[package]] [[package]]
name = "common-catalog" name = "common-catalog"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"chrono", "chrono",
@@ -1665,7 +1664,7 @@ dependencies = [
[[package]] [[package]]
name = "common-datasource" name = "common-datasource"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arrow", "arrow",
"arrow-schema", "arrow-schema",
@@ -1690,7 +1689,7 @@ dependencies = [
[[package]] [[package]]
name = "common-error" name = "common-error"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"snafu", "snafu",
"strum", "strum",
@@ -1698,7 +1697,7 @@ dependencies = [
[[package]] [[package]]
name = "common-function" name = "common-function"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"chrono-tz 0.6.3", "chrono-tz 0.6.3",
@@ -1721,7 +1720,7 @@ dependencies = [
[[package]] [[package]]
name = "common-function-macro" name = "common-function-macro"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"backtrace", "backtrace",
@@ -1737,7 +1736,7 @@ dependencies = [
[[package]] [[package]]
name = "common-grpc" name = "common-grpc"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"arrow-flight", "arrow-flight",
@@ -1767,7 +1766,7 @@ dependencies = [
[[package]] [[package]]
name = "common-grpc-expr" name = "common-grpc-expr"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -1786,7 +1785,7 @@ dependencies = [
[[package]] [[package]]
name = "common-mem-prof" name = "common-mem-prof"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"common-error", "common-error",
"snafu", "snafu",
@@ -1799,7 +1798,7 @@ dependencies = [
[[package]] [[package]]
name = "common-meta" name = "common-meta"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"chrono", "chrono",
@@ -1817,9 +1816,20 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "common-pprof"
version = "0.4.0"
dependencies = [
"common-error",
"pprof",
"prost",
"snafu",
"tokio",
]
[[package]] [[package]]
name = "common-procedure" name = "common-procedure"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@@ -1841,7 +1851,7 @@ dependencies = [
[[package]] [[package]]
name = "common-procedure-test" name = "common-procedure-test"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-procedure", "common-procedure",
@@ -1849,7 +1859,7 @@ dependencies = [
[[package]] [[package]]
name = "common-query" name = "common-query"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -1869,7 +1879,7 @@ dependencies = [
[[package]] [[package]]
name = "common-recordbatch" name = "common-recordbatch"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"common-error", "common-error",
"datafusion", "datafusion",
@@ -1885,7 +1895,7 @@ dependencies = [
[[package]] [[package]]
name = "common-runtime" name = "common-runtime"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-error", "common-error",
@@ -1901,7 +1911,7 @@ dependencies = [
[[package]] [[package]]
name = "common-telemetry" name = "common-telemetry"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"backtrace", "backtrace",
"common-error", "common-error",
@@ -1926,7 +1936,7 @@ dependencies = [
[[package]] [[package]]
name = "common-test-util" name = "common-test-util"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"rand", "rand",
@@ -1935,7 +1945,7 @@ dependencies = [
[[package]] [[package]]
name = "common-time" name = "common-time"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"chrono", "chrono",
"chrono-tz 0.8.2", "chrono-tz 0.8.2",
@@ -2079,6 +2089,15 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
[[package]]
name = "cpp_demangle"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
dependencies = [
"cfg-if 1.0.0",
]
[[package]] [[package]]
name = "cpufeatures" name = "cpufeatures"
version = "0.2.7" version = "0.2.7"
@@ -2380,7 +2399,7 @@ dependencies = [
"hashbrown 0.12.3", "hashbrown 0.12.3",
"lock_api", "lock_api",
"once_cell", "once_cell",
"parking_lot_core 0.9.7", "parking_lot_core 0.9.8",
] ]
[[package]] [[package]]
@@ -2566,7 +2585,7 @@ dependencies = [
[[package]] [[package]]
name = "datanode" name = "datanode"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-compat", "async-compat",
@@ -2622,7 +2641,7 @@ dependencies = [
"sql", "sql",
"storage", "storage",
"store-api", "store-api",
"substrait 0.2.0", "substrait 0.4.0",
"table", "table",
"table-procedure", "table-procedure",
"tokio", "tokio",
@@ -2636,7 +2655,7 @@ dependencies = [
[[package]] [[package]]
name = "datatypes" name = "datatypes"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arrow", "arrow",
"arrow-array", "arrow-array",
@@ -2656,6 +2675,15 @@ dependencies = [
"snafu", "snafu",
] ]
[[package]]
name = "debugid"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d"
dependencies = [
"uuid",
]
[[package]] [[package]]
name = "der" name = "der"
version = "0.5.1" version = "0.5.1"
@@ -3062,7 +3090,7 @@ dependencies = [
[[package]] [[package]]
name = "file-table-engine" name = "file-table-engine"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-catalog", "common-catalog",
@@ -3100,6 +3128,18 @@ dependencies = [
"windows-sys 0.48.0", "windows-sys 0.48.0",
] ]
[[package]]
name = "findshlibs"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64"
dependencies = [
"cc",
"lazy_static",
"libc",
"winapi",
]
[[package]] [[package]]
name = "fixedbitset" name = "fixedbitset"
version = "0.4.2" version = "0.4.2"
@@ -3114,9 +3154,9 @@ checksum = "cda653ca797810c02f7ca4b804b40b8b95ae046eb989d356bce17919a8c25499"
[[package]] [[package]]
name = "flatbuffers" name = "flatbuffers"
version = "23.1.21" version = "23.5.26"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77f5399c2c9c50ae9418e522842ad362f61ee48b346ac106807bd355a8a7c619" checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640"
dependencies = [ dependencies = [
"bitflags 1.3.2", "bitflags 1.3.2",
"rustc_version 0.4.0", "rustc_version 0.4.0",
@@ -3141,9 +3181,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]] [[package]]
name = "form_urlencoded" name = "form_urlencoded"
version = "1.1.0" version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
dependencies = [ dependencies = [
"percent-encoding", "percent-encoding",
] ]
@@ -3159,7 +3199,7 @@ dependencies = [
[[package]] [[package]]
name = "frontend" name = "frontend"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-compat", "async-compat",
@@ -3213,7 +3253,7 @@ dependencies = [
"storage", "storage",
"store-api", "store-api",
"strfmt", "strfmt",
"substrait 0.2.0", "substrait 0.4.0",
"table", "table",
"tokio", "tokio",
"toml", "toml",
@@ -4348,9 +4388,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]] [[package]]
name = "idna" name = "idna"
version = "0.3.0" version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [ dependencies = [
"unicode-bidi", "unicode-bidi",
"unicode-normalization", "unicode-normalization",
@@ -4385,9 +4425,9 @@ dependencies = [
[[package]] [[package]]
name = "indicatif" name = "indicatif"
version = "0.17.4" version = "0.17.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8" checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057"
dependencies = [ dependencies = [
"console", "console",
"instant", "instant",
@@ -4402,6 +4442,24 @@ version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306" checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
[[package]]
name = "inferno"
version = "0.11.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc"
dependencies = [
"ahash 0.8.3",
"indexmap",
"is-terminal",
"itoa",
"log",
"num-format",
"once_cell",
"quick-xml 0.26.0",
"rgb",
"str_stack",
]
[[package]] [[package]]
name = "influxdb_line_protocol" name = "influxdb_line_protocol"
version = "0.1.0" version = "0.1.0"
@@ -4699,9 +4757,9 @@ dependencies = [
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.144" version = "0.2.145"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" checksum = "fc86cde3ff845662b8f4ef6cb50ea0e20c524eb3d29ae048287e06a1b3fa6a81"
[[package]] [[package]]
name = "libgit2-sys" name = "libgit2-sys"
@@ -4785,9 +4843,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]] [[package]]
name = "lock_api" name = "lock_api"
version = "0.4.9" version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"scopeguard", "scopeguard",
@@ -4801,7 +4859,7 @@ checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]] [[package]]
name = "log-store" name = "log-store"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-stream", "async-stream",
@@ -5063,7 +5121,7 @@ dependencies = [
[[package]] [[package]]
name = "meta-client" name = "meta-client"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -5091,7 +5149,7 @@ dependencies = [
[[package]] [[package]]
name = "meta-srv" name = "meta-srv"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anymap", "anymap",
"api", "api",
@@ -5283,7 +5341,7 @@ dependencies = [
[[package]] [[package]]
name = "mito" name = "mito"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anymap", "anymap",
"arc-swap", "arc-swap",
@@ -5307,6 +5365,7 @@ dependencies = [
"futures", "futures",
"key-lock", "key-lock",
"log-store", "log-store",
"metrics",
"object-store", "object-store",
"serde", "serde",
"serde_json", "serde_json",
@@ -5643,6 +5702,16 @@ dependencies = [
"syn 1.0.109", "syn 1.0.109",
] ]
[[package]]
name = "num-format"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3"
dependencies = [
"arrayvec",
"itoa",
]
[[package]] [[package]]
name = "num-integer" name = "num-integer"
version = "0.1.45" version = "0.1.45"
@@ -5734,16 +5803,16 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]] [[package]]
name = "object" name = "object"
version = "0.30.3" version = "0.30.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385"
dependencies = [ dependencies = [
"memchr", "memchr",
] ]
[[package]] [[package]]
name = "object-store" name = "object-store"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -5782,9 +5851,9 @@ dependencies = [
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.17.2" version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]] [[package]]
name = "oorandom" name = "oorandom"
@@ -6025,7 +6094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [ dependencies = [
"lock_api", "lock_api",
"parking_lot_core 0.9.7", "parking_lot_core 0.9.8",
] ]
[[package]] [[package]]
@@ -6044,18 +6113,18 @@ dependencies = [
[[package]] [[package]]
name = "parking_lot_core" name = "parking_lot_core"
version = "0.9.7" version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447"
dependencies = [ dependencies = [
"backtrace", "backtrace",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"libc", "libc",
"petgraph", "petgraph",
"redox_syscall 0.2.16", "redox_syscall 0.3.5",
"smallvec", "smallvec",
"thread-id", "thread-id",
"windows-sys 0.45.0", "windows-targets 0.48.0",
] ]
[[package]] [[package]]
@@ -6116,7 +6185,7 @@ dependencies = [
[[package]] [[package]]
name = "partition" name = "partition"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -6195,9 +6264,9 @@ dependencies = [
[[package]] [[package]]
name = "percent-encoding" name = "percent-encoding"
version = "2.2.0" version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]] [[package]]
name = "pest" name = "pest"
@@ -6519,6 +6588,32 @@ dependencies = [
"postgres-protocol", "postgres-protocol",
] ]
[[package]]
name = "pprof"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
"findshlibs",
"inferno",
"libc",
"log",
"nix 0.26.2",
"once_cell",
"parking_lot 0.12.1",
"prost",
"prost-build",
"prost-derive",
"protobuf",
"sha2",
"smallvec",
"symbolic-demangle",
"tempfile",
"thiserror",
]
[[package]] [[package]]
name = "ppv-lite86" name = "ppv-lite86"
version = "0.2.17" version = "0.2.17"
@@ -6677,7 +6772,7 @@ dependencies = [
[[package]] [[package]]
name = "promql" name = "promql"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-recursion", "async-recursion",
"async-trait", "async-trait",
@@ -6927,7 +7022,7 @@ dependencies = [
[[package]] [[package]]
name = "query" name = "query"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"ahash 0.8.3", "ahash 0.8.3",
"approx_eq", "approx_eq",
@@ -6981,12 +7076,21 @@ dependencies = [
"stats-cli", "stats-cli",
"store-api", "store-api",
"streaming-stats", "streaming-stats",
"substrait 0.2.0", "substrait 0.4.0",
"table", "table",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
] ]
[[package]]
name = "quick-xml"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "quick-xml" name = "quick-xml"
version = "0.27.1" version = "0.27.1"
@@ -7175,11 +7279,11 @@ dependencies = [
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.8.3" version = "1.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
dependencies = [ dependencies = [
"aho-corasick 1.0.1", "aho-corasick 1.0.2",
"memchr", "memchr",
"regex-syntax 0.7.2", "regex-syntax 0.7.2",
] ]
@@ -7342,6 +7446,15 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "rgb"
version = "0.8.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59"
dependencies = [
"bytemuck",
]
[[package]] [[package]]
name = "ring" name = "ring"
version = "0.16.20" version = "0.16.20"
@@ -8139,7 +8252,7 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "script" name = "script"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arrow", "arrow",
"async-trait", "async-trait",
@@ -8394,7 +8507,7 @@ dependencies = [
[[package]] [[package]]
name = "servers" name = "servers"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"aide", "aide",
"api", "api",
@@ -8414,6 +8527,7 @@ dependencies = [
"common-grpc", "common-grpc",
"common-grpc-expr", "common-grpc-expr",
"common-mem-prof", "common-mem-prof",
"common-pprof",
"common-query", "common-query",
"common-recordbatch", "common-recordbatch",
"common-runtime", "common-runtime",
@@ -8462,6 +8576,7 @@ dependencies = [
"sql", "sql",
"strum", "strum",
"table", "table",
"tikv-jemalloc-ctl",
"tokio", "tokio",
"tokio-postgres", "tokio-postgres",
"tokio-postgres-rustls", "tokio-postgres-rustls",
@@ -8476,7 +8591,7 @@ dependencies = [
[[package]] [[package]]
name = "session" name = "session"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"common-catalog", "common-catalog",
@@ -8751,7 +8866,7 @@ dependencies = [
[[package]] [[package]]
name = "sql" name = "sql"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"common-base", "common-base",
@@ -8797,7 +8912,7 @@ dependencies = [
[[package]] [[package]]
name = "sqlness-runner" name = "sqlness-runner"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"client", "client",
@@ -8941,6 +9056,12 @@ dependencies = [
"optional", "optional",
] ]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]] [[package]]
name = "static_assertions" name = "static_assertions"
version = "1.1.0" version = "1.1.0"
@@ -8972,7 +9093,7 @@ dependencies = [
[[package]] [[package]]
name = "storage" name = "storage"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"arrow", "arrow",
@@ -9023,7 +9144,7 @@ dependencies = [
[[package]] [[package]]
name = "store-api" name = "store-api"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@@ -9048,6 +9169,12 @@ version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0"
[[package]]
name = "str_stack"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb"
[[package]] [[package]]
name = "streaming-stats" name = "streaming-stats"
version = "0.2.3" version = "0.2.3"
@@ -9132,7 +9259,7 @@ dependencies = [
[[package]] [[package]]
name = "substrait" name = "substrait"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-recursion", "async-recursion",
"async-trait", "async-trait",
@@ -9204,6 +9331,29 @@ version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "symbolic-common"
version = "10.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737"
dependencies = [
"debugid",
"memmap2",
"stable_deref_trait",
"uuid",
]
[[package]]
name = "symbolic-demangle"
version = "10.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489"
dependencies = [
"cpp_demangle",
"rustc-demangle",
"symbolic-common",
]
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.109" version = "1.0.109"
@@ -9264,7 +9414,7 @@ dependencies = [
[[package]] [[package]]
name = "table" name = "table"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"anymap", "anymap",
"async-trait", "async-trait",
@@ -9300,7 +9450,7 @@ dependencies = [
[[package]] [[package]]
name = "table-procedure" name = "table-procedure"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"catalog", "catalog",
@@ -9393,7 +9543,7 @@ dependencies = [
[[package]] [[package]]
name = "tests-integration" name = "tests-integration"
version = "0.2.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -10493,9 +10643,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]] [[package]]
name = "url" name = "url"
version = "2.3.1" version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb"
dependencies = [ dependencies = [
"form_urlencoded", "form_urlencoded",
"idna", "idna",
@@ -11072,9 +11222,9 @@ dependencies = [
[[package]] [[package]]
name = "xml-rs" name = "xml-rs"
version = "0.8.13" version = "0.8.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d8f380ae16a37b30e6a2cf67040608071384b1450c189e61bea3ff57cde922d" checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c"
[[package]] [[package]]
name = "xz2" name = "xz2"

View File

@@ -17,6 +17,7 @@ members = [
"src/common/meta", "src/common/meta",
"src/common/procedure", "src/common/procedure",
"src/common/procedure-test", "src/common/procedure-test",
"src/common/pprof",
"src/common/query", "src/common/query",
"src/common/recordbatch", "src/common/recordbatch",
"src/common/runtime", "src/common/runtime",
@@ -49,7 +50,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "0.3.0" version = "0.4.0"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"

View File

@@ -106,7 +106,7 @@ Please see [the online document site](https://docs.greptime.com/getting-started/
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/). Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients). To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
## Resources ## Resources
@@ -123,7 +123,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
### Documentation ### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html) - GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer - GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html) Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs) - GreptimeDB [internal code document](https://greptimedb.rs)

View File

@@ -51,7 +51,7 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions. # Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10 checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration # Region manifest logs and checkpoints gc execution duration
gc_duration = '30s' gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening # Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false checkpoint_on_startup = false

View File

@@ -115,7 +115,7 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions. # Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10 checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration # Region manifest logs and checkpoints gc execution duration
gc_duration = '30s' gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening # Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false checkpoint_on_startup = false

View File

@@ -41,7 +41,7 @@ pub enum Error {
))] ))]
ConvertColumnDefaultConstraint { ConvertColumnDefaultConstraint {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -52,7 +52,7 @@ pub enum Error {
))] ))]
InvalidColumnDefaultConstraint { InvalidColumnDefaultConstraint {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
} }

View File

@@ -32,18 +32,18 @@ pub enum Error {
source source
))] ))]
CompileScriptInternal { CompileScriptInternal {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to open system catalog table, source: {}", source))] #[snafu(display("Failed to open system catalog table, source: {}", source))]
OpenSystemCatalog { OpenSystemCatalog {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("Failed to create system catalog table, source: {}", source))] #[snafu(display("Failed to create system catalog table, source: {}", source))]
CreateSystemCatalog { CreateSystemCatalog {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -54,7 +54,7 @@ pub enum Error {
))] ))]
CreateTable { CreateTable {
table_info: String, table_info: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -94,7 +94,7 @@ pub enum Error {
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))] #[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound { TableEngineNotFound {
engine_name: String, engine_name: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -132,7 +132,7 @@ pub enum Error {
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))] #[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable { OpenTable {
table_info: String, table_info: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -147,13 +147,13 @@ pub enum Error {
#[snafu(display("Failed to read system catalog table records"))] #[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog { ReadSystemCatalog {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to create recordbatch, source: {}", source))] #[snafu(display("Failed to create recordbatch, source: {}", source))]
CreateRecordBatch { CreateRecordBatch {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
@@ -162,7 +162,7 @@ pub enum Error {
source source
))] ))]
InsertCatalogRecord { InsertCatalogRecord {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -173,7 +173,7 @@ pub enum Error {
))] ))]
DeregisterTable { DeregisterTable {
request: DeregisterTableRequest, request: DeregisterTableRequest,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -182,36 +182,36 @@ pub enum Error {
#[snafu(display("Failed to scan system catalog table, source: {}", source))] #[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan { SystemCatalogTableScan {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("{source}"))] #[snafu(display("{source}"))]
Internal { Internal {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))] #[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec { SystemCatalogTableScanExec {
#[snafu(backtrace)] location: Location,
source: common_query::error::Error, source: common_query::error::Error,
}, },
#[snafu(display("Cannot parse catalog value, source: {}", source))] #[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue { InvalidCatalogValue {
#[snafu(backtrace)] location: Location,
source: common_catalog::error::Error, source: common_catalog::error::Error,
}, },
#[snafu(display("Failed to perform metasrv operation, source: {}", source))] #[snafu(display("Failed to perform metasrv operation, source: {}", source))]
MetaSrv { MetaSrv {
#[snafu(backtrace)] location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
#[snafu(display("Invalid table info in catalog, source: {}", source))] #[snafu(display("Invalid table info in catalog, source: {}", source))]
InvalidTableInfoInCatalog { InvalidTableInfoInCatalog {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -230,7 +230,7 @@ pub enum Error {
#[snafu(display("Table schema mismatch, source: {}", source))] #[snafu(display("Table schema mismatch, source: {}", source))]
TableSchemaMismatch { TableSchemaMismatch {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -258,7 +258,7 @@ impl ErrorExt for Error {
Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal, Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => { Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code() source.status_code()
} }
Error::InvalidCatalogValue { source, .. } => source.status_code(), Error::InvalidCatalogValue { source, .. } => source.status_code(),
@@ -275,14 +275,14 @@ impl ErrorExt for Error {
| Error::OpenTable { source, .. } | Error::OpenTable { source, .. }
| Error::CreateTable { source, .. } | Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } | Error::DeregisterTable { source, .. }
| Error::TableSchemaMismatch { source } => source.status_code(), | Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(), Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(), Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(), Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source } | Error::Internal { source } => { Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code() source.status_code()
} }

View File

@@ -17,7 +17,7 @@ use std::fmt::Debug;
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
pub use client::CachedMetaKvBackend; pub use client::{CachedMetaKvBackend, MetaKvBackend};
use futures::Stream; use futures::Stream;
use futures_util::StreamExt; use futures_util::StreamExt;
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider}; pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};

View File

@@ -31,7 +31,6 @@ pub struct DatanodeClients {
impl Default for DatanodeClients { impl Default for DatanodeClients {
fn default() -> Self { fn default() -> Self {
// TODO(LFC): Make this channel config configurable.
let config = ChannelConfig::new().timeout(Duration::from_secs(8)); let config = ChannelConfig::new().timeout(Duration::from_secs(8));
Self { Self {

View File

@@ -254,7 +254,6 @@ impl Database {
let mut client = self.client.make_flight_client()?; let mut client = self.client.make_flight_client()?;
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client let flight_data: Vec<FlightData> = client
.mut_inner() .mut_inner()
.do_get(request) .do_get(request)

View File

@@ -34,13 +34,13 @@ pub enum Error {
#[snafu(display("Failed to convert FlightData, source: {}", source))] #[snafu(display("Failed to convert FlightData, source: {}", source))]
ConvertFlightData { ConvertFlightData {
#[snafu(backtrace)] location: Location,
source: common_grpc::Error, source: common_grpc::Error,
}, },
#[snafu(display("Column datatype error, source: {}", source))] #[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType { ColumnDataType {
#[snafu(backtrace)] location: Location,
source: api::error::Error, source: api::error::Error,
}, },
@@ -57,7 +57,7 @@ pub enum Error {
))] ))]
CreateChannel { CreateChannel {
addr: String, addr: String,
#[snafu(backtrace)] location: Location,
source: common_grpc::error::Error, source: common_grpc::error::Error,
}, },
@@ -85,7 +85,7 @@ impl ErrorExt for Error {
Error::Server { code, .. } => *code, Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(), Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => { Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code() source.status_code()
} }
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected, Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,

View File

@@ -10,7 +10,6 @@ name = "greptime"
path = "src/bin/greptime.rs" path = "src/bin/greptime.rs"
[features] [features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
tokio-console = ["common-telemetry/tokio-console"] tokio-console = ["common-telemetry/tokio-console"]
[dependencies] [dependencies]
@@ -42,8 +41,7 @@ servers = { path = "../servers" }
session = { path = "../session" } session = { path = "../session" }
snafu.workspace = true snafu.workspace = true
substrait = { path = "../common/substrait" } substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true } tikv-jemallocator = "0.5"
tikv-jemallocator = { version = "0.5", optional = true }
tokio.workspace = true tokio.workspace = true
[dev-dependencies] [dev-dependencies]

View File

@@ -180,15 +180,19 @@ fn full_version() -> &'static str {
) )
} }
#[cfg(feature = "mem-prof")] fn log_env_flags() {
info!("command line arguments");
for argument in std::env::args() {
info!("argument: {}", argument);
}
}
#[global_allocator] #[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
let cmd = Command::parse(); let cmd = Command::parse();
// TODO(dennis):
// 1. adds ip/port to app
let app_name = &cmd.subcmd.to_string(); let app_name = &cmd.subcmd.to_string();
let opts = cmd.load_options()?; let opts = cmd.load_options()?;
@@ -205,6 +209,14 @@ async fn main() -> Result<()> {
// Report app version as gauge. // Report app version as gauge.
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version()); gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
// Log version and argument flags.
info!(
"short_version: {}, full_version: {}",
short_version(),
full_version()
);
log_env_flags();
let mut app = cmd.build(opts).await?; let mut app = cmd.build(opts).await?;
tokio::select! { tokio::select! {

View File

@@ -84,8 +84,6 @@ struct StartCommand {
rpc_addr: Option<String>, rpc_addr: Option<String>,
#[clap(long)] #[clap(long)]
rpc_hostname: Option<String>, rpc_hostname: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long, multiple = true, value_delimiter = ',')] #[clap(long, multiple = true, value_delimiter = ',')]
metasrv_addr: Option<Vec<String>>, metasrv_addr: Option<Vec<String>>,
#[clap(short, long)] #[clap(short, long)]
@@ -126,10 +124,6 @@ impl StartCommand {
opts.rpc_hostname = self.rpc_hostname.clone(); opts.rpc_hostname = self.rpc_hostname.clone();
} }
if let Some(addr) = &self.mysql_addr {
opts.mysql_addr = addr.clone();
}
if let Some(node_id) = self.node_id { if let Some(node_id) = self.node_id {
opts.node_id = Some(node_id); opts.node_id = Some(node_id);
} }
@@ -205,8 +199,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001" rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1" rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8 rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options] [meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"] metasrv_addrs = ["127.0.0.1:3002"]
@@ -252,8 +244,6 @@ mod tests {
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() }; cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr); assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id); assert_eq!(Some(42), options.node_id);
assert_eq!("/other/wal", options.wal.dir.unwrap()); assert_eq!("/other/wal", options.wal.dir.unwrap());
@@ -368,8 +358,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001" rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1" rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8 rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options] [meta_client_options]
timeout_millis = 3000 timeout_millis = 3000

View File

@@ -24,43 +24,43 @@ use snafu::Location;
pub enum Error { pub enum Error {
#[snafu(display("Failed to start datanode, source: {}", source))] #[snafu(display("Failed to start datanode, source: {}", source))]
StartDatanode { StartDatanode {
#[snafu(backtrace)] location: Location,
source: datanode::error::Error, source: datanode::error::Error,
}, },
#[snafu(display("Failed to shutdown datanode, source: {}", source))] #[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode { ShutdownDatanode {
#[snafu(backtrace)] location: Location,
source: datanode::error::Error, source: datanode::error::Error,
}, },
#[snafu(display("Failed to start frontend, source: {}", source))] #[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend { StartFrontend {
#[snafu(backtrace)] location: Location,
source: frontend::error::Error, source: frontend::error::Error,
}, },
#[snafu(display("Failed to shutdown frontend, source: {}", source))] #[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend { ShutdownFrontend {
#[snafu(backtrace)] location: Location,
source: frontend::error::Error, source: frontend::error::Error,
}, },
#[snafu(display("Failed to build meta server, source: {}", source))] #[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer { BuildMetaServer {
#[snafu(backtrace)] location: Location,
source: meta_srv::error::Error, source: meta_srv::error::Error,
}, },
#[snafu(display("Failed to start meta server, source: {}", source))] #[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer { StartMetaServer {
#[snafu(backtrace)] location: Location,
source: meta_srv::error::Error, source: meta_srv::error::Error,
}, },
#[snafu(display("Failed to shutdown meta server, source: {}", source))] #[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer { ShutdownMetaServer {
#[snafu(backtrace)] location: Location,
source: meta_srv::error::Error, source: meta_srv::error::Error,
}, },
@@ -72,14 +72,14 @@ pub enum Error {
#[snafu(display("Illegal auth config: {}", source))] #[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig { IllegalAuthConfig {
#[snafu(backtrace)] location: Location,
source: servers::auth::Error, source: servers::auth::Error,
}, },
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))] #[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
UnsupportedSelectorType { UnsupportedSelectorType {
selector_type: String, selector_type: String,
#[snafu(backtrace)] location: Location,
source: meta_srv::error::Error, source: meta_srv::error::Error,
}, },
@@ -101,44 +101,44 @@ pub enum Error {
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))] #[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
RequestDatabase { RequestDatabase {
sql: String, sql: String,
#[snafu(backtrace)] location: Location,
source: client::Error, source: client::Error,
}, },
#[snafu(display("Failed to collect RecordBatches, source: {source}"))] #[snafu(display("Failed to collect RecordBatches, source: {source}"))]
CollectRecordBatches { CollectRecordBatches {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))] #[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
PrettyPrintRecordBatches { PrettyPrintRecordBatches {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to start Meta client, source: {}", source))] #[snafu(display("Failed to start Meta client, source: {}", source))]
StartMetaClient { StartMetaClient {
#[snafu(backtrace)] location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))] #[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
ParseSql { ParseSql {
sql: String, sql: String,
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to plan statement, source: {}", source))] #[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement { PlanStatement {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))] #[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
SubstraitEncodeLogicalPlan { SubstraitEncodeLogicalPlan {
#[snafu(backtrace)] location: Location,
source: substrait::error::Error, source: substrait::error::Error,
}, },
@@ -150,7 +150,7 @@ pub enum Error {
#[snafu(display("Failed to start catalog manager, source: {}", source))] #[snafu(display("Failed to start catalog manager, source: {}", source))]
StartCatalogManager { StartCatalogManager {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
} }
@@ -160,13 +160,13 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error { impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
Error::StartDatanode { source } => source.status_code(), Error::StartDatanode { source, .. } => source.status_code(),
Error::StartFrontend { source } => source.status_code(), Error::StartFrontend { source, .. } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(), Error::ShutdownDatanode { source, .. } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(), Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(), Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(), Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(), Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(), Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::MissingConfig { .. } Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. } | Error::LoadLayeredConfig { .. }
@@ -175,15 +175,14 @@ impl ErrorExt for Error {
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments, | Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal, Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(), Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => { Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
source.status_code() source.status_code()
} }
Error::StartMetaClient { source } => source.status_code(), Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source } => { Error::StartCatalogManager { source, .. } => source.status_code(),
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
Error::StartCatalogManager { source } => source.status_code(),
} }
} }

View File

@@ -16,13 +16,16 @@ use std::fmt;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu}; use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, Volatility}; use common_query::prelude::{Signature, Volatility};
use common_time::timestamp::TimeUnit; use common_time::timestamp::TimeUnit;
use common_time::Timestamp; use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType; use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType; use datatypes::types::TimestampType;
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef}; use datatypes::vectors::{
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
};
use snafu::ensure; use snafu::ensure;
use crate::scalars::function::{Function, FunctionContext}; use crate::scalars::function::{Function, FunctionContext};
@@ -42,18 +45,33 @@ fn convert_to_seconds(arg: &str) -> Option<i64> {
} }
} }
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
(0..vector.len())
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
.collect::<Vec<Option<i64>>>()
}
impl Function for ToUnixtimeFunction { impl Function for ToUnixtimeFunction {
fn name(&self) -> &str { fn name(&self) -> &str {
NAME NAME
} }
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> { fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_second_datatype()) Ok(ConcreteDataType::int64_datatype())
} }
fn signature(&self) -> Signature { fn signature(&self) -> Signature {
Signature::exact( Signature::uniform(
vec![ConcreteDataType::String(StringType)], 1,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
],
Volatility::Immutable, Volatility::Immutable,
) )
} }
@@ -61,7 +79,7 @@ impl Function for ToUnixtimeFunction {
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> { fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!( ensure!(
columns.len() == 1, columns.len() == 1,
error::InvalidFuncArgsSnafu { InvalidFuncArgsSnafu {
err_msg: format!( err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}", "The length of the args is not correct, expect exactly one, have: {}",
columns.len() columns.len()
@@ -79,6 +97,42 @@ impl Function for ToUnixtimeFunction {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
))) )))
} }
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
let array = columns[0].to_arrow_array();
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
}
ConcreteDataType::Timestamp(ts) => {
let array = columns[0].to_arrow_array();
let value = match ts {
TimestampType::Second(_) => {
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
array
)
.unwrap());
process_vector(&vector)
}
TimestampType::Millisecond(_) => {
let vector = paste::expr!(
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
);
process_vector(&vector)
}
TimestampType::Microsecond(_) => {
let vector = paste::expr!(
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
);
process_vector(&vector)
}
TimestampType::Nanosecond(_) => {
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
array
)
.unwrap());
process_vector(&vector)
}
};
Ok(Arc::new(Int64Vector::from(value)))
}
_ => UnsupportedInputDataTypeSnafu { _ => UnsupportedInputDataTypeSnafu {
function: NAME, function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(), datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
@@ -97,28 +151,37 @@ impl fmt::Display for ToUnixtimeFunction {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use common_query::prelude::TypeSignature; use common_query::prelude::TypeSignature;
use datatypes::prelude::ConcreteDataType; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
use datatypes::types::StringType; use datatypes::scalars::ScalarVector;
use datatypes::timestamp::TimestampSecond;
use datatypes::value::Value; use datatypes::value::Value;
use datatypes::vectors::StringVector; use datatypes::vectors::{StringVector, TimestampSecondVector};
use super::{ToUnixtimeFunction, *}; use super::{ToUnixtimeFunction, *};
use crate::scalars::Function; use crate::scalars::Function;
#[test] #[test]
fn test_to_unixtime() { fn test_string_to_unixtime() {
let f = ToUnixtimeFunction::default(); let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name()); assert_eq!("to_unixtime", f.name());
assert_eq!( assert_eq!(
ConcreteDataType::timestamp_second_datatype(), ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap() f.return_type(&[]).unwrap()
); );
assert!(matches!(f.signature(), assert!(matches!(f.signature(),
Signature { Signature {
type_signature: TypeSignature::Exact(valid_types), type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::String(StringType)] } if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
)); ));
let times = vec![ let times = vec![
@@ -145,4 +208,106 @@ mod tests {
} }
} }
} }
#[test]
fn test_int_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
));
let times = vec![Some(3_i64), None, Some(5_i64), None];
let results = vec![Some(3), None, Some(5), None];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_timestamp_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
]
));
let times: Vec<Option<TimestampSecond>> = vec![
Some(TimestampSecond::new(123)),
None,
Some(TimestampSecond::new(42)),
None,
];
let results = vec![Some(123), None, Some(42), None];
let ts_vector: TimestampSecondVector = build_vector_from_slice(&times);
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
let mut builder = T::Builder::with_capacity(items.len());
for item in items {
builder.push(*item);
}
builder.finish()
}
} }

View File

@@ -32,7 +32,7 @@ pub enum Error {
#[snafu(display("Column datatype error, source: {}", source))] #[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType { ColumnDataType {
#[snafu(backtrace)] location: Location,
source: api::error::Error, source: api::error::Error,
}, },
@@ -54,7 +54,7 @@ pub enum Error {
InvalidColumnProto { err_msg: String, location: Location }, InvalidColumnProto { err_msg: String, location: Location },
#[snafu(display("Failed to create vector, source: {}", source))] #[snafu(display("Failed to create vector, source: {}", source))]
CreateVector { CreateVector {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -68,13 +68,13 @@ pub enum Error {
))] ))]
InvalidColumnDef { InvalidColumnDef {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: api::error::Error, source: api::error::Error,
}, },
#[snafu(display("Unrecognized table option: {}", source))] #[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption { UnrecognizedTableOption {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },

View File

@@ -53,7 +53,7 @@ pub enum Error {
#[snafu(display("Failed to create RecordBatch, source: {}", source))] #[snafu(display("Failed to create RecordBatch, source: {}", source))]
CreateRecordBatch { CreateRecordBatch {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
@@ -71,7 +71,7 @@ pub enum Error {
#[snafu(display("Failed to convert Arrow Schema, source: {}", source))] #[snafu(display("Failed to convert Arrow Schema, source: {}", source))]
ConvertArrowSchema { ConvertArrowSchema {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
} }
@@ -88,8 +88,8 @@ impl ErrorExt for Error {
| Error::Conversion { .. } | Error::Conversion { .. }
| Error::DecodeFlightData { .. } => StatusCode::Internal, | Error::DecodeFlightData { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source } => source.status_code(), Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::ConvertArrowSchema { source } => source.status_code(), Error::ConvertArrowSchema { source, .. } => source.status_code(),
} }
} }

View File

@@ -23,7 +23,7 @@ pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)] #[derive(Debug, Snafu)]
#[snafu(visibility(pub))] #[snafu(visibility(pub))]
pub enum Error { pub enum Error {
#[snafu(display("Failed to read OPT_PROF"))] #[snafu(display("Failed to read OPT_PROF, source: {}", source))]
ReadOptProf { source: tikv_jemalloc_ctl::Error }, ReadOptProf { source: tikv_jemalloc_ctl::Error },
#[snafu(display("Memory profiling is not enabled"))] #[snafu(display("Memory profiling is not enabled"))]
@@ -32,13 +32,17 @@ pub enum Error {
#[snafu(display("Failed to build temp file from given path: {:?}", path))] #[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, location: Location }, BuildTempPath { path: PathBuf, location: Location },
#[snafu(display("Failed to open temp file: {}", path))] #[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
OpenTempFile { OpenTempFile {
path: String, path: String,
source: std::io::Error, source: std::io::Error,
}, },
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))] #[snafu(display(
"Failed to dump profiling data to temp file: {:?}, source: {}",
path,
source
))]
DumpProfileData { DumpProfileData {
path: PathBuf, path: PathBuf,
source: tikv_jemalloc_ctl::Error, source: tikv_jemalloc_ctl::Error,

View File

@@ -0,0 +1,16 @@
[package]
name = "common-pprof"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
pprof = { version = "0.11", features = [
"flamegraph",
"prost-codec",
"protobuf",
] }
prost.workspace = true
snafu.workspace = true
tokio.workspace = true

View File

@@ -0,0 +1,28 @@
# Profiling CPU
## Build GreptimeDB with `pprof` feature
```bash
cargo build --features=pprof
```
## HTTP API
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
```bash
curl -s '0:4000/v1/prof/cpu' > /tmp/pprof.out
```
Then you can use `pprof` command with the protobuf file.
```bash
go tool pprof -top /tmp/pprof.out
```
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
```bash
curl -s '0:4000/v1/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
```
Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash
curl -s '0:4000/v1/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
```

124
src/common/pprof/src/lib.rs Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::time::Duration;
use common_error::prelude::{ErrorExt, StatusCode};
use prost::Message;
use snafu::{Location, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Failed to create profiler guard, source: {}, location: {}",
source,
location
))]
CreateGuard {
source: pprof::Error,
location: Location,
},
#[snafu(display("Failed to create report, source: {}, location: {}", source, location))]
CreateReport {
source: pprof::Error,
location: Location,
},
#[snafu(display(
"Failed to create flamegraph, source: {}, location: {}",
source,
location
))]
CreateFlamegraph {
source: pprof::Error,
location: Location,
},
#[snafu(display(
"Failed to create pprof report, source: {}, location: {}",
source,
location
))]
ReportPprof {
source: pprof::Error,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
StatusCode::Unexpected
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// CPU profiler utility.
// Inspired by https://github.com/datafuselabs/databend/blob/67f445e83cd4eceda98f6c1c114858929d564029/src/common/base/src/base/profiling.rs
#[derive(Debug)]
pub struct Profiling {
/// Sample duration.
duration: Duration,
/// Sample frequency.
frequency: i32,
}
impl Profiling {
/// Creates a new profiler.
pub fn new(duration: Duration, frequency: i32) -> Profiling {
Profiling {
duration,
frequency,
}
}
/// Profiles and returns a generated pprof report.
pub async fn report(&self) -> Result<pprof::Report> {
let guard = pprof::ProfilerGuardBuilder::default()
.frequency(self.frequency)
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()
.context(CreateGuardSnafu)?;
tokio::time::sleep(self.duration).await;
guard.report().build().context(CreateReportSnafu)
}
/// Profiles and returns a generated flamegraph.
pub async fn dump_flamegraph(&self) -> Result<Vec<u8>> {
let mut body: Vec<u8> = Vec::new();
let report = self.report().await?;
report
.flamegraph(&mut body)
.context(CreateFlamegraphSnafu)?;
Ok(body)
}
/// Profiles and returns a generated proto.
pub async fn dump_proto(&self) -> Result<Vec<u8>> {
let report = self.report().await?;
// Generate googles pprof format report.
let profile = report.pprof().context(ReportPprofSnafu)?;
let body = profile.encode_to_vec();
Ok(body)
}
}

View File

@@ -29,10 +29,7 @@ pub enum Error {
"Failed to execute procedure due to external error, source: {}", "Failed to execute procedure due to external error, source: {}",
source source
))] ))]
External { External { source: BoxedError },
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Loader {} is already registered", name))] #[snafu(display("Loader {} is already registered", name))]
LoaderConflict { name: String, location: Location }, LoaderConflict { name: String, location: Location },
@@ -52,7 +49,7 @@ pub enum Error {
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))] #[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
PutState { PutState {
key: String, key: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -65,14 +62,14 @@ pub enum Error {
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))] #[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
DeleteStates { DeleteStates {
keys: String, keys: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))] #[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
ListState { ListState {
path: String, path: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -83,10 +80,7 @@ pub enum Error {
}, },
#[snafu(display("Procedure exec failed, source: {}", source))] #[snafu(display("Procedure exec failed, source: {}", source))]
RetryLater { RetryLater { source: BoxedError },
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Procedure panics, procedure_id: {}", procedure_id))] #[snafu(display("Procedure panics, procedure_id: {}", procedure_id))]
ProcedurePanic { procedure_id: ProcedureId }, ProcedurePanic { procedure_id: ProcedureId },

View File

@@ -40,7 +40,7 @@ pub enum Error {
source source
))] ))]
UdfTempRecordBatch { UdfTempRecordBatch {
#[snafu(backtrace)] location: Location,
source: RecordbatchError, source: RecordbatchError,
}, },
@@ -65,19 +65,19 @@ pub enum Error {
#[snafu(display("Fail to cast scalar value into vector: {}", source))] #[snafu(display("Fail to cast scalar value into vector: {}", source))]
FromScalarValue { FromScalarValue {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
}, },
#[snafu(display("Fail to cast arrow array into vector: {}", source))] #[snafu(display("Fail to cast arrow array into vector: {}", source))]
FromArrowArray { FromArrowArray {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
}, },
#[snafu(display("Fail to cast arrow array into vector: {:?}, {}", data_type, source))] #[snafu(display("Fail to cast arrow array into vector: {:?}, {}", data_type, source))]
IntoVector { IntoVector {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
data_type: ArrowDatatype, data_type: ArrowDatatype,
}, },
@@ -93,7 +93,7 @@ pub enum Error {
#[snafu(display("Invalid input type: {}", err_msg))] #[snafu(display("Invalid input type: {}", err_msg))]
InvalidInputType { InvalidInputType {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
err_msg: String, err_msg: String,
}, },
@@ -120,19 +120,19 @@ pub enum Error {
source source
))] ))]
ConvertDfRecordBatchStream { ConvertDfRecordBatchStream {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to convert arrow schema, source: {}", source))] #[snafu(display("Failed to convert arrow schema, source: {}", source))]
ConvertArrowSchema { ConvertArrowSchema {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
}, },
#[snafu(display("Failed to execute physical plan, source: {}", source))] #[snafu(display("Failed to execute physical plan, source: {}", source))]
ExecutePhysicalPlan { ExecutePhysicalPlan {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -154,13 +154,13 @@ pub enum Error {
#[snafu(display("Query engine fail to cast value: {}", source))] #[snafu(display("Query engine fail to cast value: {}", source))]
ToScalarValue { ToScalarValue {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
}, },
#[snafu(display("Failed to get scalar vector, {}", source))] #[snafu(display("Failed to get scalar vector, {}", source))]
GetScalarVector { GetScalarVector {
#[snafu(backtrace)] location: Location,
source: DataTypeError, source: DataTypeError,
}, },
@@ -188,9 +188,9 @@ impl ErrorExt for Error {
Error::InvalidInputType { source, .. } Error::InvalidInputType { source, .. }
| Error::IntoVector { source, .. } | Error::IntoVector { source, .. }
| Error::FromScalarValue { source } | Error::FromScalarValue { source, .. }
| Error::ConvertArrowSchema { source } | Error::ConvertArrowSchema { source, .. }
| Error::FromArrowArray { source } => source.status_code(), | Error::FromArrowArray { source, .. } => source.status_code(),
Error::ExecuteRepeatedly { .. } | Error::GeneralDataFusion { .. } => { Error::ExecuteRepeatedly { .. } | Error::GeneralDataFusion { .. } => {
StatusCode::Unexpected StatusCode::Unexpected
@@ -201,7 +201,7 @@ impl ErrorExt for Error {
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments, | Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(), Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
Error::ExecutePhysicalPlan { source } => source.status_code(), Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
} }
} }
@@ -215,9 +215,3 @@ impl From<Error> for DataFusionError {
DataFusionError::External(Box::new(e)) DataFusionError::External(Box::new(e))
} }
} }
impl From<BoxedError> for Error {
fn from(source: BoxedError) -> Self {
Error::ExecutePhysicalPlan { source }
}
}

View File

@@ -172,7 +172,6 @@ impl DfAccumulator for DfAccumulatorAdaptor {
} }
fn size(&self) -> usize { fn size(&self) -> usize {
// TODO(LFC): Implement new "size" method for Accumulator.
0 0
} }
} }

View File

@@ -194,7 +194,6 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
} }
fn statistics(&self) -> Statistics { fn statistics(&self) -> Statistics {
// TODO(LFC): impl statistics
Statistics::default() Statistics::default()
} }
} }

View File

@@ -225,6 +225,7 @@ mod test {
use datatypes::prelude::ConcreteDataType; use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema; use datatypes::schema::ColumnSchema;
use datatypes::vectors::Int32Vector; use datatypes::vectors::Int32Vector;
use snafu::IntoError;
use super::*; use super::*;
use crate::RecordBatches; use crate::RecordBatches;
@@ -296,9 +297,8 @@ mod test {
let poll_err_stream = new_future_stream(Ok(vec![ let poll_err_stream = new_future_stream(Ok(vec![
Ok(batch1.clone()), Ok(batch1.clone()),
Err(error::Error::External { Err(error::ExternalSnafu
source: BoxedError::new(MockError::new(StatusCode::Unknown)), .into_error(BoxedError::new(MockError::new(StatusCode::Unknown)))),
}),
])); ]));
let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), poll_err_stream); let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), poll_err_stream);
let result = RecordBatches::try_collect(Box::pin(adapter)).await; let result = RecordBatches::try_collect(Box::pin(adapter)).await;
@@ -307,9 +307,9 @@ mod test {
"Failed to poll stream, source: External error: External error, source: Unknown" "Failed to poll stream, source: External error: External error, source: Unknown"
); );
let failed_to_init_stream = new_future_stream(Err(error::Error::External { let failed_to_init_stream =
source: BoxedError::new(MockError::new(StatusCode::Internal)), new_future_stream(Err(error::ExternalSnafu
})); .into_error(BoxedError::new(MockError::new(StatusCode::Internal)))));
let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), failed_to_init_stream); let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), failed_to_init_stream);
let result = RecordBatches::try_collect(Box::pin(adapter)).await; let result = RecordBatches::try_collect(Box::pin(adapter)).await;
assert_eq!( assert_eq!(

View File

@@ -33,13 +33,13 @@ pub enum Error {
#[snafu(display("Data types error, source: {}", source))] #[snafu(display("Data types error, source: {}", source))]
DataTypes { DataTypes {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
#[snafu(display("External error, source: {}", source))] #[snafu(display("External error, source: {}", source))]
External { External {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -99,7 +99,7 @@ pub enum Error {
CastVector { CastVector {
from_type: ConcreteDataType, from_type: ConcreteDataType,
to_type: ConcreteDataType, to_type: ConcreteDataType,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
} }
@@ -117,7 +117,7 @@ impl ErrorExt for Error {
| Error::ColumnNotExists { .. } | Error::ColumnNotExists { .. }
| Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal, | Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal,
Error::External { source } => source.status_code(), Error::External { source, .. } => source.status_code(),
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => { Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
source.status_code() source.status_code()

View File

@@ -74,7 +74,7 @@ pub enum Error {
#[snafu(display("Internal error: {}", source))] #[snafu(display("Internal error: {}", source))]
Internal { Internal {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -96,14 +96,14 @@ pub enum Error {
#[snafu(display("Failed to convert DataFusion schema, source: {}", source))] #[snafu(display("Failed to convert DataFusion schema, source: {}", source))]
ConvertDfSchema { ConvertDfSchema {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
#[snafu(display("Unable to resolve table: {table_name}, error: {source}"))] #[snafu(display("Unable to resolve table: {table_name}, error: {source}"))]
ResolveTable { ResolveTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -141,7 +141,7 @@ impl ErrorExt for Error {
| Error::Internal { .. } | Error::Internal { .. }
| Error::EncodeDfPlan { .. } | Error::EncodeDfPlan { .. }
| Error::DecodeDfPlan { .. } => StatusCode::Internal, | Error::DecodeDfPlan { .. } => StatusCode::Internal,
Error::ConvertDfSchema { source } => source.status_code(), Error::ConvertDfSchema { source, .. } => source.status_code(),
Error::ResolveTable { source, .. } => source.status_code(), Error::ResolveTable { source, .. } => source.status_code(),
} }
} }

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::fmt::{Debug, Display, Formatter};
use crate::timestamp::TimeUnit; use crate::timestamp::TimeUnit;
use crate::timestamp_millis::TimestampMillis; use crate::timestamp_millis::TimestampMillis;
use crate::Timestamp; use crate::Timestamp;
@@ -193,6 +195,38 @@ impl<T: PartialOrd> GenericRange<T> {
pub type TimestampRange = GenericRange<Timestamp>; pub type TimestampRange = GenericRange<Timestamp>;
impl Display for TimestampRange {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let s = match (&self.start, &self.end) {
(Some(start), Some(end)) => {
format!(
"TimestampRange{{[{}{},{}{})}}",
start.value(),
start.unit().short_name(),
end.value(),
end.unit().short_name()
)
}
(Some(start), None) => {
format!(
"TimestampRange{{[{}{},#)}}",
start.value(),
start.unit().short_name()
)
}
(None, Some(end)) => {
format!(
"TimestampRange{{[#,{}{})}}",
end.value(),
end.unit().short_name()
)
}
(None, None) => "TimestampRange{{[#,#)}}".to_string(),
};
f.write_str(&s)
}
}
impl TimestampRange { impl TimestampRange {
/// Create a TimestampRange with optional inclusive end timestamp. /// Create a TimestampRange with optional inclusive end timestamp.
/// If end timestamp is present and is less than start timestamp, this method will return /// If end timestamp is present and is less than start timestamp, this method will return

View File

@@ -336,6 +336,15 @@ impl TimeUnit {
TimeUnit::Nanosecond => 1, TimeUnit::Nanosecond => 1,
} }
} }
pub(crate) fn short_name(&self) -> &'static str {
match self {
TimeUnit::Second => "s",
TimeUnit::Millisecond => "ms",
TimeUnit::Microsecond => "us",
TimeUnit::Nanosecond => "ns",
}
}
} }
impl PartialOrd for Timestamp { impl PartialOrd for Timestamp {

View File

@@ -221,7 +221,7 @@ impl Default for RegionManifestConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
checkpoint_margin: Some(10u16), checkpoint_margin: Some(10u16),
gc_duration: Some(Duration::from_secs(30)), gc_duration: Some(Duration::from_secs(600)),
checkpoint_on_startup: false, checkpoint_on_startup: false,
compress: false, compress: false,
} }
@@ -340,8 +340,6 @@ pub struct DatanodeOptions {
pub rpc_addr: String, pub rpc_addr: String,
pub rpc_hostname: Option<String>, pub rpc_hostname: Option<String>,
pub rpc_runtime_size: usize, pub rpc_runtime_size: usize,
pub mysql_addr: String,
pub mysql_runtime_size: usize,
pub http_opts: HttpOptions, pub http_opts: HttpOptions,
pub meta_client_options: Option<MetaClientOptions>, pub meta_client_options: Option<MetaClientOptions>,
pub wal: WalConfig, pub wal: WalConfig,
@@ -359,8 +357,6 @@ impl Default for DatanodeOptions {
rpc_addr: "127.0.0.1:3001".to_string(), rpc_addr: "127.0.0.1:3001".to_string(),
rpc_hostname: None, rpc_hostname: None,
rpc_runtime_size: 8, rpc_runtime_size: 8,
mysql_addr: "127.0.0.1:4406".to_string(),
mysql_runtime_size: 2,
http_opts: HttpOptions::default(), http_opts: HttpOptions::default(),
meta_client_options: None, meta_client_options: None,
wal: WalConfig::default(), wal: WalConfig::default(),

View File

@@ -27,14 +27,14 @@ use table::error::Error as TableError;
pub enum Error { pub enum Error {
#[snafu(display("Failed to access catalog, source: {}", source))] #[snafu(display("Failed to access catalog, source: {}", source))]
AccessCatalog { AccessCatalog {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
#[snafu(display("Failed to deregister table: {}, source: {}", table_name, source))] #[snafu(display("Failed to deregister table: {}, source: {}", table_name, source))]
DeregisterTable { DeregisterTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -48,7 +48,7 @@ pub enum Error {
#[snafu(display("Failed to open table: {}, source: {}", table_name, source))] #[snafu(display("Failed to open table: {}, source: {}", table_name, source))]
OpenTable { OpenTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
@@ -68,7 +68,7 @@ pub enum Error {
CloseTable { CloseTable {
table_name: String, table_name: String,
region_numbers: Vec<RegionNumber>, region_numbers: Vec<RegionNumber>,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
@@ -93,31 +93,31 @@ pub enum Error {
#[snafu(display("Failed to execute sql, source: {}", source))] #[snafu(display("Failed to execute sql, source: {}", source))]
ExecuteSql { ExecuteSql {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to plan statement, source: {}", source))] #[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement { PlanStatement {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to execute statement, source: {}", source))] #[snafu(display("Failed to execute statement, source: {}", source))]
ExecuteStatement { ExecuteStatement {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to execute logical plan, source: {}", source))] #[snafu(display("Failed to execute logical plan, source: {}", source))]
ExecuteLogicalPlan { ExecuteLogicalPlan {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to decode logical plan, source: {}", source))] #[snafu(display("Failed to decode logical plan, source: {}", source))]
DecodeLogicalPlan { DecodeLogicalPlan {
#[snafu(backtrace)] location: Location,
source: substrait::error::Error, source: substrait::error::Error,
}, },
@@ -126,7 +126,7 @@ pub enum Error {
#[snafu(display("Failed to create catalog list, source: {}", source))] #[snafu(display("Failed to create catalog list, source: {}", source))]
NewCatalog { NewCatalog {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -139,21 +139,21 @@ pub enum Error {
#[snafu(display("Failed to create table: {}, source: {}", table_name, source))] #[snafu(display("Failed to create table: {}, source: {}", table_name, source))]
CreateTable { CreateTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
#[snafu(display("Failed to drop table {}, source: {}", table_name, source))] #[snafu(display("Failed to drop table {}, source: {}", table_name, source))]
DropTable { DropTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))] #[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound { TableEngineNotFound {
engine_name: String, engine_name: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -164,7 +164,7 @@ pub enum Error {
))] ))]
EngineProcedureNotFound { EngineProcedureNotFound {
engine_name: String, engine_name: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -192,7 +192,7 @@ pub enum Error {
#[snafu(display("Failed to parse sql value, source: {}", source))] #[snafu(display("Failed to parse sql value, source: {}", source))]
ParseSqlValue { ParseSqlValue {
#[snafu(backtrace)] location: Location,
source: sql::error::Error, source: sql::error::Error,
}, },
@@ -202,7 +202,7 @@ pub enum Error {
#[snafu(display("Failed to insert value to table: {}, source: {}", table_name, source))] #[snafu(display("Failed to insert value to table: {}, source: {}", table_name, source))]
Insert { Insert {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
@@ -213,20 +213,20 @@ pub enum Error {
))] ))]
Delete { Delete {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))] #[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
FlushTable { FlushTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: TableError, source: TableError,
}, },
#[snafu(display("Failed to start server, source: {}", source))] #[snafu(display("Failed to start server, source: {}", source))]
StartServer { StartServer {
#[snafu(backtrace)] location: Location,
source: servers::error::Error, source: servers::error::Error,
}, },
@@ -250,8 +250,8 @@ pub enum Error {
#[snafu(display("Failed to open log store, source: {}", source))] #[snafu(display("Failed to open log store, source: {}", source))]
OpenLogStore { OpenLogStore {
#[snafu(backtrace)] location: Location,
source: log_store::error::Error, source: Box<log_store::error::Error>,
}, },
#[snafu(display("Failed to init backend, source: {}", source))] #[snafu(display("Failed to init backend, source: {}", source))]
@@ -262,7 +262,7 @@ pub enum Error {
#[snafu(display("Runtime resource error, source: {}", source))] #[snafu(display("Runtime resource error, source: {}", source))]
RuntimeResource { RuntimeResource {
#[snafu(backtrace)] location: Location,
source: common_runtime::error::Error, source: common_runtime::error::Error,
}, },
@@ -289,7 +289,7 @@ pub enum Error {
#[snafu(display("Failed to register a new schema, source: {}", source))] #[snafu(display("Failed to register a new schema, source: {}", source))]
RegisterSchema { RegisterSchema {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -298,25 +298,25 @@ pub enum Error {
#[snafu(display("Failed to convert alter expr to request: {}", source))] #[snafu(display("Failed to convert alter expr to request: {}", source))]
AlterExprToRequest { AlterExprToRequest {
#[snafu(backtrace)] location: Location,
source: common_grpc_expr::error::Error, source: common_grpc_expr::error::Error,
}, },
#[snafu(display("Failed to convert create expr to request: {}", source))] #[snafu(display("Failed to convert create expr to request: {}", source))]
CreateExprToRequest { CreateExprToRequest {
#[snafu(backtrace)] location: Location,
source: common_grpc_expr::error::Error, source: common_grpc_expr::error::Error,
}, },
#[snafu(display("Failed to convert delete expr to request: {}", source))] #[snafu(display("Failed to convert delete expr to request: {}", source))]
DeleteExprToRequest { DeleteExprToRequest {
#[snafu(backtrace)] location: Location,
source: common_grpc_expr::error::Error, source: common_grpc_expr::error::Error,
}, },
#[snafu(display("Failed to parse SQL, source: {}", source))] #[snafu(display("Failed to parse SQL, source: {}", source))]
ParseSql { ParseSql {
#[snafu(backtrace)] location: Location,
source: sql::error::Error, source: sql::error::Error,
}, },
@@ -327,38 +327,38 @@ pub enum Error {
))] ))]
ParseTimestamp { ParseTimestamp {
raw: String, raw: String,
#[snafu(backtrace)] location: Location,
source: common_time::error::Error, source: common_time::error::Error,
}, },
#[snafu(display("Failed to prepare immutable table: {}", source))] #[snafu(display("Failed to prepare immutable table: {}", source))]
PrepareImmutableTable { PrepareImmutableTable {
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to access catalog, source: {}", source))] #[snafu(display("Failed to access catalog, source: {}", source))]
Catalog { Catalog {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
#[snafu(display("Failed to find table {} from catalog, source: {}", table_name, source))] #[snafu(display("Failed to find table {} from catalog, source: {}", table_name, source))]
FindTable { FindTable {
table_name: String, table_name: String,
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
#[snafu(display("Failed to initialize meta client, source: {}", source))] #[snafu(display("Failed to initialize meta client, source: {}", source))]
MetaClientInit { MetaClientInit {
#[snafu(backtrace)] location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
#[snafu(display("Failed to insert data, source: {}", source))] #[snafu(display("Failed to insert data, source: {}", source))]
InsertData { InsertData {
#[snafu(backtrace)] location: Location,
source: common_grpc_expr::error::Error, source: common_grpc_expr::error::Error,
}, },
@@ -369,7 +369,7 @@ pub enum Error {
#[snafu(display("Failed to bump table id, source: {}", source))] #[snafu(display("Failed to bump table id, source: {}", source))]
BumpTableId { BumpTableId {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
@@ -392,7 +392,7 @@ pub enum Error {
))] ))]
ColumnDefaultValue { ColumnDefaultValue {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -404,45 +404,45 @@ pub enum Error {
#[snafu(display("Unrecognized table option: {}", source))] #[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption { UnrecognizedTableOption {
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("Failed to recover procedure, source: {}", source))] #[snafu(display("Failed to recover procedure, source: {}", source))]
RecoverProcedure { RecoverProcedure {
#[snafu(backtrace)] location: Location,
source: common_procedure::error::Error, source: common_procedure::error::Error,
}, },
#[snafu(display("Failed to submit procedure {}, source: {}", procedure_id, source))] #[snafu(display("Failed to submit procedure {}, source: {}", procedure_id, source))]
SubmitProcedure { SubmitProcedure {
procedure_id: ProcedureId, procedure_id: ProcedureId,
#[snafu(backtrace)] location: Location,
source: common_procedure::error::Error, source: common_procedure::error::Error,
}, },
#[snafu(display("Failed to wait procedure {} done, source: {}", procedure_id, source))] #[snafu(display("Failed to wait procedure {} done, source: {}", procedure_id, source))]
WaitProcedure { WaitProcedure {
procedure_id: ProcedureId, procedure_id: ProcedureId,
#[snafu(backtrace)] location: Location,
source: common_procedure::error::Error, source: common_procedure::error::Error,
}, },
#[snafu(display("Failed to close table engine, source: {}", source))] #[snafu(display("Failed to close table engine, source: {}", source))]
CloseTableEngine { CloseTableEngine {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to shutdown server, source: {}", source))] #[snafu(display("Failed to shutdown server, source: {}", source))]
ShutdownServer { ShutdownServer {
#[snafu(backtrace)] location: Location,
source: servers::error::Error, source: servers::error::Error,
}, },
#[snafu(display("Failed to shutdown instance, source: {}", source))] #[snafu(display("Failed to shutdown instance, source: {}", source))]
ShutdownInstance { ShutdownInstance {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -487,15 +487,15 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
use Error::*; use Error::*;
match self { match self {
ExecuteSql { source } ExecuteSql { source, .. }
| PlanStatement { source } | PlanStatement { source, .. }
| ExecuteStatement { source } | ExecuteStatement { source, .. }
| ExecuteLogicalPlan { source } => source.status_code(), | ExecuteLogicalPlan { source, .. } => source.status_code(),
HandleHeartbeatResponse { source, .. } => source.status_code(), HandleHeartbeatResponse { source, .. } => source.status_code(),
DecodeLogicalPlan { source } => source.status_code(), DecodeLogicalPlan { source, .. } => source.status_code(),
NewCatalog { source } | RegisterSchema { source } => source.status_code(), NewCatalog { source, .. } | RegisterSchema { source, .. } => source.status_code(),
FindTable { source, .. } => source.status_code(), FindTable { source, .. } => source.status_code(),
CreateTable { source, .. } => source.status_code(), CreateTable { source, .. } => source.status_code(),
DropTable { source, .. } => source.status_code(), DropTable { source, .. } => source.status_code(),
@@ -512,9 +512,9 @@ impl ErrorExt for Error {
ParseSqlValue { source, .. } | ParseSql { source, .. } => source.status_code(), ParseSqlValue { source, .. } | ParseSql { source, .. } => source.status_code(),
AlterExprToRequest { source, .. } AlterExprToRequest { source, .. }
| CreateExprToRequest { source } | CreateExprToRequest { source, .. }
| DeleteExprToRequest { source } | DeleteExprToRequest { source, .. }
| InsertData { source } => source.status_code(), | InsertData { source, .. } => source.status_code(),
ColumnValuesNumberMismatch { .. } ColumnValuesNumberMismatch { .. }
| InvalidSql { .. } | InvalidSql { .. }
@@ -559,13 +559,13 @@ impl ErrorExt for Error {
| CloseTableEngine { .. } | CloseTableEngine { .. }
| JoinTask { .. } => StatusCode::Internal, | JoinTask { .. } => StatusCode::Internal,
StartServer { source } StartServer { source, .. }
| ShutdownServer { source } | ShutdownServer { source, .. }
| WaitForGrpcServing { source, .. } => source.status_code(), | WaitForGrpcServing { source, .. } => source.status_code(),
InitBackend { .. } => StatusCode::StorageUnavailable, InitBackend { .. } => StatusCode::StorageUnavailable,
OpenLogStore { source } => source.status_code(), OpenLogStore { source, .. } => source.status_code(),
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted, RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
MetaClientInit { source, .. } => source.status_code(), MetaClientInit { source, .. } => source.status_code(),
TableIdProviderNotFound { .. } => StatusCode::Unsupported, TableIdProviderNotFound { .. } => StatusCode::Unsupported,

View File

@@ -23,7 +23,7 @@ use common_meta::heartbeat::handler::{
}; };
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef}; use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message; use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::{error, info, trace, warn}; use common_telemetry::{debug, error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient}; use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt; use snafu::ResultExt;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@@ -199,6 +199,7 @@ impl HeartbeatTask {
} }
}; };
if let Some(req) = req { if let Some(req) = req {
debug!("Sending heartbeat request: {:?}", req);
if let Err(e) = tx.send(req).await { if let Err(e) = tx.send(req).await {
error!("Failed to send heartbeat to metasrv, error: {:?}", e); error!("Failed to send heartbeat to metasrv, error: {:?}", e);
match Self::create_streams( match Self::create_streams(

View File

@@ -421,6 +421,7 @@ pub(crate) async fn create_log_store(
let logstore = RaftEngineLogStore::try_new(log_config) let logstore = RaftEngineLogStore::try_new(log_config)
.await .await
.map_err(Box::new)
.context(OpenLogStoreSnafu)?; .context(OpenLogStoreSnafu)?;
Ok(logstore) Ok(logstore)
} }

View File

@@ -115,13 +115,13 @@ pub enum Error {
source source
))] ))]
ConvertRaw { ConvertRaw {
#[snafu(backtrace)] location: Location,
source: table::metadata::ConvertError, source: table::metadata::ConvertError,
}, },
#[snafu(display("Invalid schema, source: {}", source))] #[snafu(display("Invalid schema, source: {}", source))]
InvalidRawSchema { InvalidRawSchema {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -130,7 +130,7 @@ pub enum Error {
#[snafu(display("Failed to build backend, source: {}", source))] #[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend { BuildBackend {
#[snafu(backtrace)] location: Location,
source: common_datasource::error::Error, source: common_datasource::error::Error,
}, },
@@ -154,13 +154,13 @@ pub enum Error {
#[snafu(display("Failed to build stream adapter: {}", source))] #[snafu(display("Failed to build stream adapter: {}", source))]
BuildStreamAdapter { BuildStreamAdapter {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to parse file format: {}", source))] #[snafu(display("Failed to parse file format: {}", source))]
ParseFileFormat { ParseFileFormat {
#[snafu(backtrace)] location: Location,
source: common_datasource::error::Error, source: common_datasource::error::Error,
}, },

View File

@@ -20,8 +20,7 @@ use common_meta::heartbeat::handler::{
}; };
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage}; use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message; use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::tracing::trace; use common_telemetry::{debug, error, info};
use common_telemetry::{error, info};
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient}; use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use snafu::ResultExt; use snafu::ResultExt;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@@ -83,7 +82,7 @@ impl HeartbeatTask {
loop { loop {
match resp_stream.message().await { match resp_stream.message().await {
Ok(Some(resp)) => { Ok(Some(resp)) => {
trace!("Received a heartbeat response: {:?}", resp); debug!("Receiving heartbeat response: {:?}", resp);
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), resp); let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), resp);
if let Err(e) = capture_self.handle_response(ctx) { if let Err(e) = capture_self.handle_response(ctx) {
error!(e; "Error while handling heartbeat response"); error!(e; "Error while handling heartbeat response");
@@ -92,7 +91,6 @@ impl HeartbeatTask {
Ok(None) => break, Ok(None) => break,
Err(e) => { Err(e) => {
error!(e; "Occur error while reading heartbeat response"); error!(e; "Occur error while reading heartbeat response");
capture_self capture_self
.start_with_retry(Duration::from_secs(retry_interval)) .start_with_retry(Duration::from_secs(retry_interval))
.await; .await;
@@ -148,7 +146,7 @@ impl HeartbeatTask {
error!(e; "Failed to send heartbeat to metasrv"); error!(e; "Failed to send heartbeat to metasrv");
break; break;
} else { } else {
trace!("Send a heartbeat request to metasrv, content: {:?}", req); debug!("Send a heartbeat request to metasrv, content: {:?}", req);
} }
} }
} }

View File

@@ -285,9 +285,6 @@ impl Instance {
requests: InsertRequests, requests: InsertRequests,
ctx: QueryContextRef, ctx: QueryContextRef,
) -> Result<Output> { ) -> Result<Output> {
// TODO(LFC): Optimize concurrent table creation and table alteration.
// Currently table creation is guarded by a distributed lock in Metasrv. However, table
// alteration is not. We should all switch to procedures in Metasrv.
let _ = future::join_all( let _ = future::join_all(
requests requests
.inserts .inserts
@@ -563,6 +560,7 @@ impl PromHandler for Instance {
let stmt = QueryLanguageParser::parse_promql(query).with_context(|_| ParsePromQLSnafu { let stmt = QueryLanguageParser::parse_promql(query).with_context(|_| ParsePromQLSnafu {
query: query.clone(), query: query.clone(),
})?; })?;
self.statement_executor self.statement_executor
.execute_stmt(stmt, query_ctx) .execute_stmt(stmt, query_ctx)
.await .await

View File

@@ -598,7 +598,6 @@ impl DistInstance {
Ok(Output::AffectedRows(affected_rows as usize)) Ok(Output::AffectedRows(affected_rows as usize))
} }
// TODO(LFC): Like insertions above, refactor GRPC deletion impl here.
async fn handle_dist_delete( async fn handle_dist_delete(
&self, &self,
request: DeleteRequest, request: DeleteRequest,
@@ -662,8 +661,6 @@ impl GrpcQueryHandler for DistInstance {
match expr { match expr {
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await, DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await,
DdlExpr::CreateTable(mut expr) => { DdlExpr::CreateTable(mut expr) => {
// TODO(LFC): Support creating distributed table through GRPC interface.
// Currently only SQL supports it; how to design the fields in CreateTableExpr?
let _ = self.create_table(&mut expr, None).await; let _ = self.create_table(&mut expr, None).await;
Ok(Output::AffectedRows(0)) Ok(Output::AffectedRows(0))
} }

View File

@@ -23,13 +23,13 @@ use snafu::Location;
pub enum Error { pub enum Error {
#[snafu(display("Failed to start log store gc task, source: {}", source))] #[snafu(display("Failed to start log store gc task, source: {}", source))]
StartGcTask { StartGcTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },
#[snafu(display("Failed to stop log store gc task, source: {}", source))] #[snafu(display("Failed to stop log store gc task, source: {}", source))]
StopGcTask { StopGcTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },

View File

@@ -35,7 +35,7 @@ pub enum Error {
#[snafu(display("Failed to create gRPC channel, source: {}", source))] #[snafu(display("Failed to create gRPC channel, source: {}", source))]
CreateChannel { CreateChannel {
#[snafu(backtrace)] location: Location,
source: common_grpc::error::Error, source: common_grpc::error::Error,
}, },
@@ -50,19 +50,19 @@ pub enum Error {
#[snafu(display("Invalid response header, source: {}", source))] #[snafu(display("Invalid response header, source: {}", source))]
InvalidResponseHeader { InvalidResponseHeader {
#[snafu(backtrace)] location: Location,
source: common_meta::error::Error, source: common_meta::error::Error,
}, },
#[snafu(display("Failed to convert Metasrv request, source: {}", source))] #[snafu(display("Failed to convert Metasrv request, source: {}", source))]
ConvertMetaRequest { ConvertMetaRequest {
#[snafu(backtrace)] location: Location,
source: common_meta::error::Error, source: common_meta::error::Error,
}, },
#[snafu(display("Failed to convert Metasrv response, source: {}", source))] #[snafu(display("Failed to convert Metasrv response, source: {}", source))]
ConvertMetaResponse { ConvertMetaResponse {
#[snafu(backtrace)] location: Location,
source: common_meta::error::Error, source: common_meta::error::Error,
}, },
} }
@@ -86,9 +86,9 @@ impl ErrorExt for Error {
| Error::CreateHeartbeatStream { .. } | Error::CreateHeartbeatStream { .. }
| Error::CreateChannel { .. } => StatusCode::Internal, | Error::CreateChannel { .. } => StatusCode::Internal,
Error::InvalidResponseHeader { source } Error::InvalidResponseHeader { source, .. }
| Error::ConvertMetaRequest { source } | Error::ConvertMetaRequest { source, .. }
| Error::ConvertMetaResponse { source } => source.status_code(), | Error::ConvertMetaResponse { source, .. } => source.status_code(),
} }
} }
} }

View File

@@ -26,7 +26,7 @@ pub enum Error {
#[snafu(display("Failed to shutdown {} server, source: {}", server, source))] #[snafu(display("Failed to shutdown {} server, source: {}", server, source))]
ShutdownServer { ShutdownServer {
#[snafu(backtrace)] location: Location,
source: servers::error::Error, source: servers::error::Error,
server: String, server: String,
}, },
@@ -60,7 +60,7 @@ pub enum Error {
}, },
#[snafu(display("Failed to start http server, source: {}", source))] #[snafu(display("Failed to start http server, source: {}", source))]
StartHttp { StartHttp {
#[snafu(backtrace)] location: Location,
source: servers::error::Error, source: servers::error::Error,
}, },
#[snafu(display("Failed to parse address {}, source: {}", addr, source))] #[snafu(display("Failed to parse address {}, source: {}", addr, source))]
@@ -130,7 +130,7 @@ pub enum Error {
#[snafu(display("Cannot parse catalog value, source: {}", source))] #[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue { InvalidCatalogValue {
#[snafu(backtrace)] location: Location,
source: common_catalog::error::Error, source: common_catalog::error::Error,
}, },
@@ -190,7 +190,7 @@ pub enum Error {
#[snafu(display("Failed to create gRPC channel, source: {}", source))] #[snafu(display("Failed to create gRPC channel, source: {}", source))]
CreateChannel { CreateChannel {
#[snafu(backtrace)] location: Location,
source: common_grpc::error::Error, source: common_grpc::error::Error,
}, },
@@ -273,7 +273,7 @@ pub enum Error {
#[snafu(display("Failed to recover procedure, source: {source}"))] #[snafu(display("Failed to recover procedure, source: {source}"))]
RecoverProcedure { RecoverProcedure {
#[snafu(backtrace)] location: Location,
source: common_procedure::Error, source: common_procedure::Error,
}, },
@@ -321,7 +321,7 @@ pub enum Error {
))] ))]
RegisterProcedureLoader { RegisterProcedureLoader {
type_name: String, type_name: String,
#[snafu(backtrace)] location: Location,
source: common_procedure::error::Error, source: common_procedure::error::Error,
}, },
@@ -350,7 +350,7 @@ pub enum Error {
#[snafu(display("Failed to convert table route, source: {}", source))] #[snafu(display("Failed to convert table route, source: {}", source))]
TableRouteConversion { TableRouteConversion {
#[snafu(backtrace)] location: Location,
source: common_meta::error::Error, source: common_meta::error::Error,
}, },
@@ -434,15 +434,15 @@ impl ErrorExt for Error {
| Error::Unexpected { .. } => StatusCode::Unexpected, | Error::Unexpected { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound, Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidCatalogValue { source, .. } => source.status_code(), Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::RecoverProcedure { source } => source.status_code(), Error::RecoverProcedure { source, .. } => source.status_code(),
Error::ShutdownServer { source, .. } | Error::StartHttp { source } => { Error::ShutdownServer { source, .. } | Error::StartHttp { source, .. } => {
source.status_code() source.status_code()
} }
Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted, Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted,
Error::RegisterProcedureLoader { source, .. } => source.status_code(), Error::RegisterProcedureLoader { source, .. } => source.status_code(),
Error::TableRouteConversion { source } => source.status_code(), Error::TableRouteConversion { source, .. } => source.status_code(),
Error::Other { source, .. } => source.status_code(), Error::Other { source, .. } => source.status_code(),
} }
} }

View File

@@ -74,12 +74,6 @@ impl HeartbeatHandler for RegionFailureHandler {
let Some(stat) = acc.stat.as_ref() else { return Ok(()) }; let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
// TODO(LFC): Filter out the stalled heartbeats:
// After the region failover is done, the distribution of region is changed.
// We can compare the heartbeat info here with the global region placement metadata,
// and remove the incorrect region ident keys in failure detect runner
// (by sending a control message).
let heartbeat = DatanodeHeartbeat { let heartbeat = DatanodeHeartbeat {
region_idents: stat region_idents: stat
.region_stats .region_stats

View File

@@ -146,25 +146,21 @@ impl MetaSrv {
common_runtime::spawn_bg(async move { common_runtime::spawn_bg(async move {
loop { loop {
match rx.recv().await { match rx.recv().await {
Ok(msg) => { Ok(msg) => match msg {
match msg { LeaderChangeMessage::Elected(_) => {
LeaderChangeMessage::Elected(_) => { if let Err(e) = procedure_manager.recover().await {
if let Err(e) = procedure_manager.recover().await { error!("Failed to recover procedures, error: {e}");
error!("Failed to recover procedures, error: {e}");
}
}
LeaderChangeMessage::StepDown(leader) => {
// TODO(LFC): TBC
error!("Leader :{:?} step down", leader);
} }
} }
} LeaderChangeMessage::StepDown(leader) => {
error!("Leader :{:?} step down", leader);
}
},
Err(RecvError::Closed) => { Err(RecvError::Closed) => {
error!("Not expected, is leader election loop still running?"); error!("Not expected, is leader election loop still running?");
break; break;
} }
Err(RecvError::Lagged(_)) => { Err(RecvError::Lagged(_)) => {
// TODO(LFC): TBC
break; break;
} }
} }

View File

@@ -43,16 +43,6 @@ impl UpdateRegionMetadata {
Self { candidate } Self { candidate }
} }
// TODO(LFC): Update the two table metadata values in a batch atomically.
//
// Though the updating of the two metadata values is guarded by a distributed lock,
// it does not robust enough. For example, the lock lease could be expired in the middle of
// one's updating, letting others to start updating concurrently. For now, we set the lease of
// the distributed lock to 10 seconds, which is long enough here to get the job done.
//
// Maybe we should introduce "version" companion values to these two metadata values, and
// use ETCD transaction request to update them?
/// Updates the metadata of the table. Specifically, the [TableGlobalValue] and [TableRouteValue]. /// Updates the metadata of the table. Specifically, the [TableGlobalValue] and [TableRouteValue].
async fn update_metadata( async fn update_metadata(
&self, &self,

View File

@@ -16,6 +16,7 @@ mod health;
mod heartbeat; mod heartbeat;
mod leader; mod leader;
mod meta; mod meta;
mod route;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::Infallible; use std::convert::Infallible;
@@ -73,6 +74,13 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
}, },
); );
let router = router.route(
"/route",
route::RouteHandler {
kv_store: meta_srv.kv_store(),
},
);
let router = Router::nest("/admin", router); let router = Router::nest("/admin", router);
Admin::new(router) Admin::new(router)

View File

@@ -0,0 +1,86 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use api::v1::meta::{RangeRequest, RangeResponse, TableRouteValue};
use common_meta::key::TABLE_ROUTE_PREFIX;
use prost::Message;
use snafu::{OptionExt, ResultExt};
use tonic::codegen::http;
use super::HttpHandler;
use crate::error::Result;
use crate::service::store::kv::KvStoreRef;
use crate::{error, util};
pub struct RouteHandler {
pub kv_store: KvStoreRef,
}
#[async_trait::async_trait]
impl HttpHandler for RouteHandler {
async fn handle(
&self,
_path: &str,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let full_table_name = params
.get("full_table_name")
.map(|full_table_name| full_table_name.replace('.', "-"))
.context(error::MissingRequiredParameterSnafu {
param: "full_table_name",
})?;
let route_key = format!("{}-{}", TABLE_ROUTE_PREFIX, full_table_name).into_bytes();
let range_end = util::get_prefix_end_key(&route_key);
let req = RangeRequest {
key: route_key,
range_end,
keys_only: false,
..Default::default()
};
let resp = self.kv_store.range(req).await?;
let show = pretty_fmt(resp)?;
http::Response::builder()
.status(http::StatusCode::OK)
.body(show)
.context(error::InvalidHttpBodySnafu)
}
}
fn pretty_fmt(response: RangeResponse) -> Result<String> {
let mut show = "".to_string();
for kv in response.kvs.into_iter() {
let route_key = String::from_utf8(kv.key).unwrap();
let route_val =
TableRouteValue::decode(&kv.value[..]).context(error::DecodeTableRouteSnafu)?;
show.push_str("route_key:\n");
show.push_str(&route_key);
show.push('\n');
show.push_str("route_value:\n");
show.push_str(&format!("{:#?}", route_val));
show.push('\n');
}
Ok(show)
}

View File

@@ -20,7 +20,7 @@ use api::v1::meta::{
heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse, heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse,
Peer, RequestHeader, ResponseHeader, Role, Peer, RequestHeader, ResponseHeader, Role,
}; };
use common_telemetry::{error, info, warn}; use common_telemetry::{debug, error, info, warn};
use futures::StreamExt; use futures::StreamExt;
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@@ -59,6 +59,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
break; break;
} }
}; };
debug!("Receiving heartbeat request: {:?}", req);
if pusher_key.is_none() { if pusher_key.is_none() {
let node_id = get_node_id(header); let node_id = get_node_id(header);
@@ -76,6 +77,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
is_not_leader = res.as_ref().map_or(false, |r| r.is_not_leader()); is_not_leader = res.as_ref().map_or(false, |r| r.is_not_leader());
debug!("Sending heartbeat response: {:?}", res);
tx.send(res).await.expect("working rx"); tx.send(res).await.expect("working rx");
} }
Err(err) => { Err(err) => {

View File

@@ -21,6 +21,7 @@ common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" } common-recordbatch = { path = "../common/recordbatch" }
common-datasource = { path = "../common/datasource" } common-datasource = { path = "../common/datasource" }
common-telemetry = { path = "../common/telemetry" } common-telemetry = { path = "../common/telemetry" }
common-test-util = { path = "../common/test-util", optional = true }
common-time = { path = "../common/time" } common-time = { path = "../common/time" }
dashmap = "5.4" dashmap = "5.4"
datafusion.workspace = true datafusion.workspace = true
@@ -29,6 +30,7 @@ datatypes = { path = "../datatypes" }
futures.workspace = true futures.workspace = true
key-lock = "0.1" key-lock = "0.1"
log-store = { path = "../log-store" } log-store = { path = "../log-store" }
metrics.workspace = true
object-store = { path = "../object-store" } object-store = { path = "../object-store" }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
@@ -36,7 +38,6 @@ snafu.workspace = true
storage = { path = "../storage" } storage = { path = "../storage" }
store-api = { path = "../store-api" } store-api = { path = "../store-api" }
table = { path = "../table" } table = { path = "../table" }
common-test-util = { path = "../common/test-util", optional = true }
tokio.workspace = true tokio.workspace = true
[dev-dependencies] [dev-dependencies]

View File

@@ -295,7 +295,7 @@ fn build_row_key_desc(
let column_schemas = &table_schema.column_schemas(); let column_schemas = &table_schema.column_schemas();
//TODO(boyan): enable version column by table option? //TODO(dennis): enable version column by table option?
let mut builder = RowKeyDescriptorBuilder::new(ts_column); let mut builder = RowKeyDescriptorBuilder::new(ts_column);
for index in primary_key_indices { for index in primary_key_indices {
@@ -452,7 +452,6 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.write_buffer_size .write_buffer_size
.map(|s| s.0 as usize), .map(|s| s.0 as usize),
ttl: table_info.meta.options.ttl, ttl: table_info.meta.options.ttl,
compaction_time_window: table_info.meta.options.compaction_time_window,
}; };
debug!( debug!(
@@ -532,7 +531,6 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.write_buffer_size .write_buffer_size
.map(|s| s.0 as usize), .map(|s| s.0 as usize),
ttl: table_info.meta.options.ttl, ttl: table_info.meta.options.ttl,
compaction_time_window: table_info.meta.options.compaction_time_window,
}; };
// TODO(weny): Returns an error earlier if the target region does not exist in the meta. // TODO(weny): Returns an error earlier if the target region does not exist in the meta.

View File

@@ -228,18 +228,15 @@ impl<S: StorageEngine> TableCreator<S> {
let table_options = &self.data.request.table_options; let table_options = &self.data.request.table_options;
let write_buffer_size = table_options.write_buffer_size.map(|size| size.0 as usize); let write_buffer_size = table_options.write_buffer_size.map(|size| size.0 as usize);
let ttl = table_options.ttl; let ttl = table_options.ttl;
let compaction_time_window = table_options.compaction_time_window;
let open_opts = OpenOptions { let open_opts = OpenOptions {
parent_dir: table_dir.to_string(), parent_dir: table_dir.to_string(),
write_buffer_size, write_buffer_size,
ttl, ttl,
compaction_time_window,
}; };
let create_opts = CreateOptions { let create_opts = CreateOptions {
parent_dir: table_dir.to_string(), parent_dir: table_dir.to_string(),
write_buffer_size, write_buffer_size,
ttl, ttl,
compaction_time_window,
}; };
let primary_key_indices = &self.data.request.primary_key_indices; let primary_key_indices = &self.data.request.primary_key_indices;
@@ -285,7 +282,6 @@ impl<S: StorageEngine> TableCreator<S> {
.name(region_name.clone()) .name(region_name.clone())
.row_key(row_key.clone()) .row_key(row_key.clone())
.default_cf(default_cf.clone()) .default_cf(default_cf.clone())
.compaction_time_window(compaction_time_window)
.build() .build()
.context(BuildRegionDescriptorSnafu { .context(BuildRegionDescriptorSnafu {
table_name: &self.data.request.table_name, table_name: &self.data.request.table_name,

View File

@@ -107,7 +107,7 @@ pub enum Error {
source, source,
))] ))]
UpdateTableManifest { UpdateTableManifest {
#[snafu(backtrace)] location: Location,
source: storage::error::Error, source: storage::error::Error,
table_name: String, table_name: String,
}, },
@@ -118,7 +118,7 @@ pub enum Error {
source, source,
))] ))]
ScanTableManifest { ScanTableManifest {
#[snafu(backtrace)] location: Location,
source: storage::error::Error, source: storage::error::Error,
table_name: String, table_name: String,
}, },
@@ -149,7 +149,7 @@ pub enum Error {
source source
))] ))]
ConvertRaw { ConvertRaw {
#[snafu(backtrace)] location: Location,
source: table::metadata::ConvertError, source: table::metadata::ConvertError,
}, },

View File

@@ -25,3 +25,7 @@ pub const MITO_CREATE_TABLE_UPDATE_MANIFEST_ELAPSED: &str =
pub const MITO_OPEN_TABLE_ELAPSED: &str = "datanode.mito.open_table"; pub const MITO_OPEN_TABLE_ELAPSED: &str = "datanode.mito.open_table";
/// Elapsed time of altering tables /// Elapsed time of altering tables
pub const MITO_ALTER_TABLE_ELAPSED: &str = "datanode.mito.alter_table"; pub const MITO_ALTER_TABLE_ELAPSED: &str = "datanode.mito.alter_table";
/// Elapsed time of insertion
pub const MITO_INSERT_ELAPSED: &str = "datanode.mito.insert";
/// Insert batch size.
pub const MITO_INSERT_BATCH_SIZE: &str = "datanode.mito.insert_batch_size";

View File

@@ -29,6 +29,7 @@ use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream};
use common_telemetry::{info, logging}; use common_telemetry::{info, logging};
use datatypes::schema::Schema; use datatypes::schema::Schema;
use metrics::histogram;
use object_store::ObjectStore; use object_store::ObjectStore;
use snafu::{ensure, OptionExt, ResultExt}; use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator}; use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
@@ -57,6 +58,8 @@ use crate::error::{
}; };
use crate::manifest::action::*; use crate::manifest::action::*;
use crate::manifest::TableManifest; use crate::manifest::TableManifest;
use crate::metrics::{MITO_INSERT_BATCH_SIZE, MITO_INSERT_ELAPSED};
#[inline] #[inline]
fn table_manifest_dir(table_dir: &str) -> String { fn table_manifest_dir(table_dir: &str) -> String {
assert!(table_dir.ends_with('/')); assert!(table_dir.ends_with('/'));
@@ -83,6 +86,8 @@ impl<R: Region> Table for MitoTable<R> {
} }
async fn insert(&self, request: InsertRequest) -> TableResult<usize> { async fn insert(&self, request: InsertRequest) -> TableResult<usize> {
let _timer = common_telemetry::timer!(MITO_INSERT_ELAPSED);
if request.columns_values.is_empty() { if request.columns_values.is_empty() {
return Ok(0); return Ok(0);
} }
@@ -105,6 +110,8 @@ impl<R: Region> Table for MitoTable<R> {
// columns_values is not empty, it's safe to unwrap // columns_values is not empty, it's safe to unwrap
let rows_num = columns_values.values().next().unwrap().len(); let rows_num = columns_values.values().next().unwrap().len();
histogram!(MITO_INSERT_BATCH_SIZE, rows_num as f64);
logging::trace!( logging::trace!(
"Insert into table {} region {} with data: {:?}", "Insert into table {} region {} with data: {:?}",
self.table_info().name, self.table_info().name,

View File

@@ -14,9 +14,9 @@ metrics.workspace = true
opendal = { version = "0.36", features = ["layers-tracing", "layers-metrics"] } opendal = { version = "0.36", features = ["layers-tracing", "layers-metrics"] }
pin-project = "1.0" pin-project = "1.0"
tokio.workspace = true tokio.workspace = true
uuid.workspace = true
[dev-dependencies] [dev-dependencies]
anyhow = "1.0" anyhow = "1.0"
common-telemetry = { path = "../common/telemetry" } common-telemetry = { path = "../common/telemetry" }
common-test-util = { path = "../common/test-util" } common-test-util = { path = "../common/test-util" }
uuid.workspace = true

View File

@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::env;
use crate::{ObjectStore, Result}; use crate::{ObjectStore, Result};
/// Temp folder for object store test
pub struct TempFolder { pub struct TempFolder {
store: ObjectStore, store: ObjectStore,
// The path under root. // The path under root.
@@ -28,7 +31,34 @@ impl TempFolder {
} }
} }
pub async fn remove_all(&mut self) -> Result<()> { pub async fn remove_all(&self) -> Result<()> {
self.store.remove_all(&self.path).await self.store.remove_all(&self.path).await
} }
} }
/// Test s3 config from environment variables
#[derive(Debug)]
pub struct TestS3Config {
pub root: String,
pub access_key_id: String,
pub secret_access_key: String,
pub bucket: String,
pub region: Option<String>,
}
/// Returns s3 test config, return None if not found.
pub fn s3_test_config() -> Option<TestS3Config> {
if let Ok(b) = env::var("GT_S3_BUCKET") {
if !b.is_empty() {
return Some(TestS3Config {
root: uuid::Uuid::new_v4().to_string(),
access_key_id: env::var("GT_S3_ACCESS_KEY_ID").ok()?,
secret_access_key: env::var("GT_S3_ACCESS_KEY").ok()?,
bucket: env::var("GT_S3_BUCKET").ok()?,
region: Some(env::var("GT_S3_REGION").ok()?),
});
}
}
None
}

View File

@@ -120,7 +120,7 @@ async fn test_s3_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish(); let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/"); let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?; test_object_crud(&store).await?;
test_object_list(&store).await?; test_object_list(&store).await?;
guard.remove_all().await?; guard.remove_all().await?;
@@ -148,7 +148,7 @@ async fn test_oss_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish(); let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/"); let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?; test_object_crud(&store).await?;
test_object_list(&store).await?; test_object_list(&store).await?;
guard.remove_all().await?; guard.remove_all().await?;
@@ -176,7 +176,7 @@ async fn test_azblob_backend() -> Result<()> {
let store = ObjectStore::new(builder).unwrap().finish(); let store = ObjectStore::new(builder).unwrap().finish();
let mut guard = TempFolder::new(&store, "/"); let guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?; test_object_crud(&store).await?;
test_object_list(&store).await?; test_object_list(&store).await?;
guard.remove_all().await?; guard.remove_all().await?;

View File

@@ -28,7 +28,7 @@ pub enum Error {
#[snafu(display("Failed to request Meta, source: {}", source))] #[snafu(display("Failed to request Meta, source: {}", source))]
RequestMeta { RequestMeta {
#[snafu(backtrace)] location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
@@ -75,7 +75,7 @@ pub enum Error {
))] ))]
CreateDefaultToRead { CreateDefaultToRead {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -128,7 +128,7 @@ pub enum Error {
))] ))]
ConvertScalarValue { ConvertScalarValue {
value: ScalarValue, value: ScalarValue,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },

View File

@@ -34,7 +34,7 @@ pub enum InnerError {
#[snafu(display("Fail to convert arrow schema, source: {}", source))] #[snafu(display("Fail to convert arrow schema, source: {}", source))]
ConvertSchema { ConvertSchema {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -43,13 +43,13 @@ pub enum InnerError {
source source
))] ))]
ConvertDfRecordBatchStream { ConvertDfRecordBatchStream {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to execute physical plan, source: {}", source))] #[snafu(display("Failed to execute physical plan, source: {}", source))]
ExecutePhysicalPlan { ExecutePhysicalPlan {
#[snafu(backtrace)] location: Location,
source: common_query::error::Error, source: common_query::error::Error,
}, },
} }
@@ -62,8 +62,8 @@ impl ErrorExt for InnerError {
// TODO(yingwen): Further categorize datafusion error. // TODO(yingwen): Further categorize datafusion error.
Datafusion { .. } => StatusCode::EngineExecuteQuery, Datafusion { .. } => StatusCode::EngineExecuteQuery,
PhysicalPlanDowncast { .. } | ConvertSchema { .. } => StatusCode::Unexpected, PhysicalPlanDowncast { .. } | ConvertSchema { .. } => StatusCode::Unexpected,
ConvertDfRecordBatchStream { source } => source.status_code(), ConvertDfRecordBatchStream { source, .. } => source.status_code(),
ExecutePhysicalPlan { source } => source.status_code(), ExecutePhysicalPlan { source, .. } => source.status_code(),
} }
} }

View File

@@ -27,6 +27,10 @@ use datafusion::catalog::catalog::MemoryCatalogList;
use datafusion::error::Result as DfResult; use datafusion::error::Result as DfResult;
use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionState}; use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionState};
use datafusion::execution::runtime_env::RuntimeEnv; use datafusion::execution::runtime_env::RuntimeEnv;
use datafusion::physical_optimizer::dist_enforcement::EnforceDistribution;
use datafusion::physical_optimizer::repartition::Repartition;
use datafusion::physical_optimizer::sort_enforcement::EnforceSorting;
use datafusion::physical_optimizer::PhysicalOptimizerRule;
use datafusion::physical_plan::planner::{DefaultPhysicalPlanner, ExtensionPlanner}; use datafusion::physical_plan::planner::{DefaultPhysicalPlanner, ExtensionPlanner};
use datafusion::physical_plan::{ExecutionPlan, PhysicalPlanner}; use datafusion::physical_plan::{ExecutionPlan, PhysicalPlanner};
use datafusion_expr::LogicalPlan as DfLogicalPlan; use datafusion_expr::LogicalPlan as DfLogicalPlan;
@@ -79,6 +83,22 @@ impl QueryEngineState {
let mut optimizer = Optimizer::new(); let mut optimizer = Optimizer::new();
optimizer.rules.push(Arc::new(OrderHintRule)); optimizer.rules.push(Arc::new(OrderHintRule));
let mut physical_optimizers = {
let state = SessionState::with_config_rt(session_config.clone(), runtime_env.clone());
state.physical_optimizers().to_vec()
};
// run the repartition and sort enforcement rules first.
// And `EnforceSorting` is required to run after `EnforceDistribution`.
Self::remove_physical_optimize_rule(&mut physical_optimizers, EnforceSorting {}.name());
Self::remove_physical_optimize_rule(
&mut physical_optimizers,
EnforceDistribution {}.name(),
);
Self::remove_physical_optimize_rule(&mut physical_optimizers, Repartition {}.name());
physical_optimizers.insert(0, Arc::new(EnforceSorting {}));
physical_optimizers.insert(0, Arc::new(EnforceDistribution {}));
physical_optimizers.insert(0, Arc::new(Repartition {}));
let session_state = SessionState::with_config_rt_and_catalog_list( let session_state = SessionState::with_config_rt_and_catalog_list(
session_config, session_config,
runtime_env, runtime_env,
@@ -90,7 +110,8 @@ impl QueryEngineState {
partition_manager, partition_manager,
datanode_clients, datanode_clients,
))) )))
.with_optimizer_rules(optimizer.rules); .with_optimizer_rules(optimizer.rules)
.with_physical_optimizer_rules(physical_optimizers);
let df_context = SessionContext::with_state(session_state); let df_context = SessionContext::with_state(session_state);
@@ -102,6 +123,22 @@ impl QueryEngineState {
} }
} }
fn remove_physical_optimize_rule(
rules: &mut Vec<Arc<dyn PhysicalOptimizerRule + Send + Sync>>,
name: &str,
) {
let mut index_to_move = None;
for (i, rule) in rules.iter().enumerate() {
if rule.name() == name {
index_to_move = Some(i);
break;
}
}
if let Some(index) = index_to_move {
rules.remove(index);
}
}
/// Register a udf function /// Register a udf function
// TODO(dennis): manage UDFs by ourself. // TODO(dennis): manage UDFs by ourself.
pub fn register_udf(&self, udf: ScalarUdf) { pub fn register_udf(&self, udf: ScalarUdf) {
@@ -116,13 +153,25 @@ impl QueryEngineState {
.cloned() .cloned()
} }
/// Register an aggregate function.
///
/// # Panics
/// Will panic if the function with same name is already registered.
///
/// Panicking consideration: currently the aggregated functions are all statically registered,
/// user cannot define their own aggregate functions on the fly. So we can panic here. If that
/// invariant is broken in the future, we should return an error instead of panicking.
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) { pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
// TODO(LFC): Return some error if there exists an aggregate function with the same name. let name = func.name();
// Simply overwrite the old value for now. let x = self
self.aggregate_functions .aggregate_functions
.write() .write()
.unwrap() .unwrap()
.insert(func.name(), func); .insert(name.clone(), func);
assert!(
x.is_none(),
"Already registered aggregate function '{name}'"
);
} }
#[inline] #[inline]

View File

@@ -96,7 +96,6 @@ pub async fn show_databases(
stmt: ShowDatabases, stmt: ShowDatabases,
catalog_manager: CatalogManagerRef, catalog_manager: CatalogManagerRef,
) -> Result<Output> { ) -> Result<Output> {
// TODO(LFC): supports WHERE
ensure!( ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)), matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu { error::UnsupportedExprSnafu {
@@ -136,7 +135,6 @@ pub async fn show_tables(
catalog_manager: CatalogManagerRef, catalog_manager: CatalogManagerRef,
query_ctx: QueryContextRef, query_ctx: QueryContextRef,
) -> Result<Output> { ) -> Result<Output> {
// TODO(LFC): supports WHERE
ensure!( ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)), matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu { error::UnsupportedExprSnafu {

View File

@@ -71,10 +71,6 @@ fn create_sql_options(table_meta: &TableMeta) -> Vec<SqlOption> {
)); ));
} }
if let Some(w) = table_opts.compaction_time_window {
options.push(sql_option("compaction_time_window", number_value(w)));
}
for (k, v) in table_opts for (k, v) in table_opts
.extra_options .extra_options
.iter() .iter()

View File

@@ -23,7 +23,7 @@ use snafu::Location;
pub enum Error { pub enum Error {
#[snafu(display("Failed to find scripts table, source: {}", source))] #[snafu(display("Failed to find scripts table, source: {}", source))]
FindScriptsTable { FindScriptsTable {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -32,7 +32,7 @@ pub enum Error {
#[snafu(display("Failed to register scripts table, source: {}", source))] #[snafu(display("Failed to register scripts table, source: {}", source))]
RegisterScriptsTable { RegisterScriptsTable {
#[snafu(backtrace)] location: Location,
source: catalog::error::Error, source: catalog::error::Error,
}, },
@@ -46,21 +46,21 @@ pub enum Error {
))] ))]
InsertScript { InsertScript {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("Failed to compile python script, name: {}, source: {}", name, source))] #[snafu(display("Failed to compile python script, name: {}, source: {}", name, source))]
CompilePython { CompilePython {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: crate::python::error::Error, source: crate::python::error::Error,
}, },
#[snafu(display("Failed to execute python script {}, source: {}", name, source))] #[snafu(display("Failed to execute python script {}, source: {}", name, source))]
ExecutePython { ExecutePython {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: crate::python::error::Error, source: crate::python::error::Error,
}, },
@@ -70,13 +70,13 @@ pub enum Error {
#[snafu(display("Failed to find script by name: {}", name))] #[snafu(display("Failed to find script by name: {}", name))]
FindScript { FindScript {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
#[snafu(display("Failed to collect record batch, source: {}", source))] #[snafu(display("Failed to collect record batch, source: {}", source))]
CollectRecords { CollectRecords {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
@@ -92,11 +92,13 @@ impl ErrorExt for Error {
match self { match self {
FindColumnInScriptsTable { .. } | CastType { .. } => StatusCode::Unexpected, FindColumnInScriptsTable { .. } | CastType { .. } => StatusCode::Unexpected,
ScriptsTableNotFound { .. } => StatusCode::TableNotFound, ScriptsTableNotFound { .. } => StatusCode::TableNotFound,
RegisterScriptsTable { source } | FindScriptsTable { source } => source.status_code(), RegisterScriptsTable { source, .. } | FindScriptsTable { source, .. } => {
source.status_code()
}
InsertScript { source, .. } => source.status_code(), InsertScript { source, .. } => source.status_code(),
CompilePython { source, .. } | ExecutePython { source, .. } => source.status_code(), CompilePython { source, .. } | ExecutePython { source, .. } => source.status_code(),
FindScript { source, .. } => source.status_code(), FindScript { source, .. } => source.status_code(),
CollectRecords { source } => source.status_code(), CollectRecords { source, .. } => source.status_code(),
ScriptNotFound { .. } => StatusCode::InvalidArguments, ScriptNotFound { .. } => StatusCode::InvalidArguments,
} }
} }

View File

@@ -40,7 +40,7 @@ use snafu::{ensure, ResultExt};
use sql::statements::statement::Statement; use sql::statements::statement::Statement;
use crate::engine::{CompileContext, EvalContext, Script, ScriptEngine}; use crate::engine::{CompileContext, EvalContext, Script, ScriptEngine};
use crate::python::error::{self, PyRuntimeSnafu, Result, TokioJoinSnafu}; use crate::python::error::{self, DatabaseQuerySnafu, PyRuntimeSnafu, Result, TokioJoinSnafu};
use crate::python::ffi_types::copr::{exec_parsed, parse, AnnotationInfo, CoprocessorRef}; use crate::python::ffi_types::copr::{exec_parsed, parse, AnnotationInfo, CoprocessorRef};
use crate::python::utils::spawn_blocking_script; use crate::python::utils::spawn_blocking_script;
const PY_ENGINE: &str = "python"; const PY_ENGINE: &str = "python";
@@ -290,8 +290,13 @@ impl Script for PyScript {
.query_engine .query_engine
.planner() .planner()
.plan(stmt, QueryContext::arc()) .plan(stmt, QueryContext::arc())
.await?; .await
let res = self.query_engine.execute(plan, QueryContext::arc()).await?; .context(DatabaseQuerySnafu)?;
let res = self
.query_engine
.execute(plan, QueryContext::arc())
.await
.context(DatabaseQuerySnafu)?;
let copr = self.copr.clone(); let copr = self.copr.clone();
match res { match res {
Output::Stream(stream) => Ok(Output::Stream(Box::pin(CoprStream::try_new( Output::Stream(stream) => Ok(Output::Stream(Box::pin(CoprStream::try_new(
@@ -346,6 +351,7 @@ impl ScriptEngine for PyEngine {
}) })
} }
} }
#[cfg(test)] #[cfg(test)]
pub(crate) use tests::sample_script_engine; pub(crate) use tests::sample_script_engine;

View File

@@ -35,13 +35,13 @@ pub(crate) fn ret_other_error_with(reason: String) -> OtherSnafu<String> {
pub enum Error { pub enum Error {
#[snafu(display("Datatype error: {}", source))] #[snafu(display("Datatype error: {}", source))]
TypeCast { TypeCast {
#[snafu(backtrace)] location: SnafuLocation,
source: DataTypeError, source: DataTypeError,
}, },
#[snafu(display("Failed to query, source: {}", source))] #[snafu(display("Failed to query, source: {}", source))]
DatabaseQuery { DatabaseQuery {
#[snafu(backtrace)] location: SnafuLocation,
source: QueryError, source: QueryError,
}, },
@@ -105,25 +105,19 @@ pub enum Error {
#[snafu(display("Failed to retrieve record batches, source: {}", source))] #[snafu(display("Failed to retrieve record batches, source: {}", source))]
RecordBatch { RecordBatch {
#[snafu(backtrace)] location: SnafuLocation,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to create record batch, source: {}", source))] #[snafu(display("Failed to create record batch, source: {}", source))]
NewRecordBatch { NewRecordBatch {
#[snafu(backtrace)] location: SnafuLocation,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to create tokio task, source: {}", source))] #[snafu(display("Failed to create tokio task, source: {}", source))]
TokioJoin { source: tokio::task::JoinError }, TokioJoin { source: tokio::task::JoinError },
} }
impl From<QueryError> for Error {
fn from(source: QueryError) -> Self {
Self::DatabaseQuery { source }
}
}
impl ErrorExt for Error { impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
@@ -133,11 +127,11 @@ impl ErrorExt for Error {
| Error::TokioJoin { .. } | Error::TokioJoin { .. }
| Error::Other { .. } => StatusCode::Internal, | Error::Other { .. } => StatusCode::Internal,
Error::RecordBatch { source } | Error::NewRecordBatch { source } => { Error::RecordBatch { source, .. } | Error::NewRecordBatch { source, .. } => {
source.status_code() source.status_code()
} }
Error::DatabaseQuery { source } => source.status_code(), Error::DatabaseQuery { source, .. } => source.status_code(),
Error::TypeCast { source } => source.status_code(), Error::TypeCast { source, .. } => source.status_code(),
Error::PyParse { .. } Error::PyParse { .. }
| Error::PyCompile { .. } | Error::PyCompile { .. }
@@ -150,12 +144,6 @@ impl ErrorExt for Error {
self self
} }
} }
// impl from for those error so one can use question mark and implicitly cast into `CoprError`
impl From<DataTypeError> for Error {
fn from(e: DataTypeError) -> Self {
Self::TypeCast { source: e }
}
}
/// pretty print [`Error`] in given script, /// pretty print [`Error`] in given script,
/// basically print a arrow which point to where error occurs(if possible to get a location) /// basically print a arrow which point to where error occurs(if possible to get a location)

View File

@@ -5,6 +5,7 @@ edition.workspace = true
license.workspace = true license.workspace = true
[features] [features]
pprof = ["dep:common-pprof"]
mem-prof = ["dep:common-mem-prof"] mem-prof = ["dep:common-mem-prof"]
dashboard = [] dashboard = []
@@ -25,6 +26,7 @@ common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" } common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" } common-grpc-expr = { path = "../common/grpc-expr" }
common-mem-prof = { path = "../common/mem-prof", optional = true } common-mem-prof = { path = "../common/mem-prof", optional = true }
common-pprof = { path = "../common/pprof", optional = true }
common-query = { path = "../common/query" } common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" } common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" } common-runtime = { path = "../common/runtime" }
@@ -71,6 +73,7 @@ snap = "1"
sql = { path = "../sql" } sql = { path = "../sql" }
strum = { version = "0.24", features = ["derive"] } strum = { version = "0.24", features = ["derive"] }
table = { path = "../table" } table = { path = "../table" }
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
tokio-rustls = "0.24" tokio-rustls = "0.24"
tokio-stream = { version = "0.1", features = ["net"] } tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true tokio.workspace = true

View File

@@ -111,7 +111,7 @@ pub enum Error {
#[snafu(display("Auth failed, source: {}", source))] #[snafu(display("Auth failed, source: {}", source))]
AuthBackend { AuthBackend {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },

View File

@@ -49,7 +49,7 @@ pub enum Error {
#[snafu(display("Failed to collect recordbatch, source: {}", source))] #[snafu(display("Failed to collect recordbatch, source: {}", source))]
CollectRecordbatch { CollectRecordbatch {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
@@ -71,19 +71,19 @@ pub enum Error {
#[snafu(display("Failed to execute query: {}, source: {}", query, source))] #[snafu(display("Failed to execute query: {}, source: {}", query, source))]
ExecuteQuery { ExecuteQuery {
query: String, query: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("{source}"))] #[snafu(display("{source}"))]
ExecuteGrpcQuery { ExecuteGrpcQuery {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to check database validity, source: {}", source))] #[snafu(display("Failed to check database validity, source: {}", source))]
CheckDatabaseValidity { CheckDatabaseValidity {
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -93,14 +93,14 @@ pub enum Error {
#[snafu(display("Failed to insert script with name: {}, source: {}", name, source))] #[snafu(display("Failed to insert script with name: {}, source: {}", name, source))]
InsertScript { InsertScript {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to execute script by name: {}, source: {}", name, source))] #[snafu(display("Failed to execute script by name: {}, source: {}", name, source))]
ExecuteScript { ExecuteScript {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -112,19 +112,19 @@ pub enum Error {
#[snafu(display("Failed to parse InfluxDB line protocol, source: {}", source))] #[snafu(display("Failed to parse InfluxDB line protocol, source: {}", source))]
InfluxdbLineProtocol { InfluxdbLineProtocol {
#[snafu(backtrace)] location: Location,
source: influxdb_line_protocol::Error, source: influxdb_line_protocol::Error,
}, },
#[snafu(display("Failed to write InfluxDB line protocol, source: {}", source))] #[snafu(display("Failed to write InfluxDB line protocol, source: {}", source))]
InfluxdbLinesWrite { InfluxdbLinesWrite {
#[snafu(backtrace)] location: Location,
source: common_grpc::error::Error, source: common_grpc::error::Error,
}, },
#[snafu(display("Failed to write prometheus series, source: {}", source))] #[snafu(display("Failed to write prometheus series, source: {}", source))]
PromSeriesWrite { PromSeriesWrite {
#[snafu(backtrace)] location: Location,
source: common_grpc::error::Error, source: common_grpc::error::Error,
}, },
@@ -178,7 +178,7 @@ pub enum Error {
#[snafu(display("Failed to get user info, source: {}", source))] #[snafu(display("Failed to get user info, source: {}", source))]
Auth { Auth {
#[snafu(backtrace)] location: Location,
source: auth::Error, source: auth::Error,
}, },
@@ -221,7 +221,7 @@ pub enum Error {
#[cfg(feature = "mem-prof")] #[cfg(feature = "mem-prof")]
#[snafu(display("Failed to dump profile data, source: {}", source))] #[snafu(display("Failed to dump profile data, source: {}", source))]
DumpProfileData { DumpProfileData {
#[snafu(backtrace)] location: Location,
source: common_mem_prof::error::Error, source: common_mem_prof::error::Error,
}, },
@@ -246,7 +246,7 @@ pub enum Error {
#[snafu(display("Failed to parse PromQL: {query:?}, source: {source}"))] #[snafu(display("Failed to parse PromQL: {query:?}, source: {source}"))]
ParsePromQL { ParsePromQL {
query: PromQuery, query: PromQuery,
#[snafu(backtrace)] location: Location,
source: query::error::Error, source: query::error::Error,
}, },
@@ -266,6 +266,19 @@ pub enum Error {
source: tokio::task::JoinError, source: tokio::task::JoinError,
location: Location, location: Location,
}, },
#[cfg(feature = "pprof")]
#[snafu(display("Failed to dump pprof data, source: {}", source))]
DumpPprof {
#[snafu(backtrace)]
source: common_pprof::Error,
},
#[snafu(display("Failed to update jemalloc metrics, source: {source}, location: {location}"))]
UpdateJemallocMetrics {
source: tikv_jemalloc_ctl::Error,
location: Location,
},
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -341,6 +354,11 @@ impl ErrorExt for Error {
StatusCode::Unknown StatusCode::Unknown
} }
} }
#[cfg(feature = "pprof")]
DumpPprof { source, .. } => source.status_code(),
UpdateJemallocMetrics { .. } => StatusCode::Internal,
} }
} }
@@ -402,12 +420,6 @@ impl From<std::io::Error> for Error {
} }
} }
impl From<auth::Error> for Error {
fn from(e: auth::Error) -> Self {
Error::Auth { source: e }
}
}
impl IntoResponse for Error { impl IntoResponse for Error {
fn into_response(self) -> Response { fn into_response(self) -> Response {
let (status, error_message) = match self { let (status, error_message) = match self {

View File

@@ -29,8 +29,8 @@ use snafu::{OptionExt, ResultExt};
use tonic::Status; use tonic::Status;
use crate::auth::{Identity, Password, UserProviderRef}; use crate::auth::{Identity, Password, UserProviderRef};
use crate::error::Error::{Auth, UnsupportedAuthScheme}; use crate::error::Error::UnsupportedAuthScheme;
use crate::error::{InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu}; use crate::error::{AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu};
use crate::grpc::TonicResult; use crate::grpc::TonicResult;
use crate::metrics::{ use crate::metrics::{
METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER, METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
@@ -123,7 +123,7 @@ impl GreptimeRequestHandler {
&query_ctx.current_schema(), &query_ctx.current_schema(),
) )
.await .await
.map_err(|e| Auth { source: e }), .context(AuthSnafu),
AuthScheme::Token(_) => Err(UnsupportedAuthScheme { AuthScheme::Token(_) => Err(UnsupportedAuthScheme {
name: "Token AuthScheme".to_string(), name: "Token AuthScheme".to_string(),
}), }),

View File

@@ -15,14 +15,18 @@
//! PrometheusGateway provides a gRPC interface to query Prometheus metrics //! PrometheusGateway provides a gRPC interface to query Prometheus metrics
//! by PromQL. The behavior is similar to the Prometheus HTTP API. //! by PromQL. The behavior is similar to the Prometheus HTTP API.
use std::sync::Arc;
use api::v1::prometheus_gateway_server::PrometheusGateway; use api::v1::prometheus_gateway_server::PrometheusGateway;
use api::v1::promql_request::Promql; use api::v1::promql_request::Promql;
use api::v1::{PromqlRequest, PromqlResponse, ResponseHeader}; use api::v1::{PromqlRequest, PromqlResponse, ResponseHeader};
use async_trait::async_trait; use async_trait::async_trait;
use common_error::prelude::ErrorExt;
use common_telemetry::timer; use common_telemetry::timer;
use common_time::util::current_time_rfc3339; use common_time::util::current_time_rfc3339;
use promql_parser::parser::ValueType; use promql_parser::parser::ValueType;
use query::parser::PromQuery; use query::parser::PromQuery;
use session::context::QueryContext;
use snafu::OptionExt; use snafu::OptionExt;
use tonic::{Request, Response}; use tonic::{Request, Response};
@@ -68,23 +72,9 @@ impl PrometheusGateway for PrometheusGatewayService {
}; };
let query_context = create_query_context(inner.header.as_ref()); let query_context = create_query_context(inner.header.as_ref());
let _timer = timer!( let json_response = self
crate::metrics::METRIC_SERVER_GRPC_PROM_REQUEST_TIMER, .handle_inner(prom_query, query_context, is_range_query)
&[( .await;
crate::metrics::METRIC_DB_LABEL,
query_context.get_db_string()
)]
);
let result = self.handler.do_query(&prom_query, query_context).await;
let (metric_name, mut result_type) =
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default();
// range query only returns matrix
if is_range_query {
result_type = Some(ValueType::Matrix)
};
let json_response = PromJsonResponse::from_query_result(result, metric_name, result_type)
.await
.0;
let json_bytes = serde_json::to_string(&json_response).unwrap().into_bytes(); let json_bytes = serde_json::to_string(&json_response).unwrap().into_bytes();
let response = Response::new(PromqlResponse { let response = Response::new(PromqlResponse {
@@ -99,4 +89,34 @@ impl PrometheusGatewayService {
pub fn new(handler: PromHandlerRef) -> Self { pub fn new(handler: PromHandlerRef) -> Self {
Self { handler } Self { handler }
} }
async fn handle_inner(
&self,
query: PromQuery,
ctx: Arc<QueryContext>,
is_range_query: bool,
) -> PromJsonResponse {
let _timer = timer!(
crate::metrics::METRIC_SERVER_GRPC_PROM_REQUEST_TIMER,
&[(crate::metrics::METRIC_DB_LABEL, ctx.get_db_string())]
);
let result = self.handler.do_query(&query, ctx).await;
let (metric_name, mut result_type) =
match retrieve_metric_name_and_result_type(&query.query) {
Ok((metric_name, result_type)) => (metric_name.unwrap_or_default(), result_type),
Err(err) => {
return PromJsonResponse::error(err.status_code().to_string(), err.to_string())
.0
}
};
// range query only returns matrix
if is_range_query {
result_type = ValueType::Matrix;
};
PromJsonResponse::from_query_result(result, metric_name, result_type)
.await
.0
}
} }

View File

@@ -12,18 +12,18 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod admin;
pub mod authorize; pub mod authorize;
pub mod handler; pub mod handler;
pub mod influxdb; pub mod influxdb;
pub mod mem_prof;
pub mod opentsdb; pub mod opentsdb;
mod pprof;
pub mod prometheus; pub mod prometheus;
pub mod script; pub mod script;
mod admin;
#[cfg(feature = "dashboard")] #[cfg(feature = "dashboard")]
mod dashboard; mod dashboard;
#[cfg(feature = "mem-prof")]
pub mod mem_prof;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
@@ -503,15 +503,6 @@ impl HttpServer {
); );
} }
// mem profiler
#[cfg(feature = "mem-prof")]
{
router = router.nest(
&format!("/{HTTP_API_VERSION}/prof"),
Router::new().route("/mem", routing::get(crate::http::mem_prof::mem_prof)),
);
}
if let Some(metrics_handler) = self.metrics_handler { if let Some(metrics_handler) = self.metrics_handler {
router = router.nest("", self.route_metrics(metrics_handler)); router = router.nest("", self.route_metrics(metrics_handler));
} }
@@ -556,6 +547,19 @@ impl HttpServer {
HttpAuth::<BoxBody>::new(self.user_provider.clone()), HttpAuth::<BoxBody>::new(self.user_provider.clone()),
)), )),
) )
// Handlers for debug, we don't expect a timeout.
.nest(
&format!("/{HTTP_API_VERSION}/prof"),
Router::new()
.route(
"/cpu",
routing::get(pprof::pprof_handler).post(pprof::pprof_handler),
)
.route(
"/mem",
routing::get(mem_prof::mem_prof_handler).post(mem_prof::mem_prof_handler),
),
)
} }
fn route_metrics<S>(&self, metrics_handler: MetricsHandler) -> Router<S> { fn route_metrics<S>(&self, metrics_handler: MetricsHandler) -> Router<S> {

View File

@@ -23,16 +23,15 @@ use http_body::Body;
use metrics::increment_counter; use metrics::increment_counter;
use secrecy::SecretString; use secrecy::SecretString;
use session::context::UserInfo; use session::context::UserInfo;
use snafu::{ensure, OptionExt, ResultExt}; use snafu::{ensure, IntoError, OptionExt, ResultExt};
use tower_http::auth::AsyncAuthorizeRequest; use tower_http::auth::AsyncAuthorizeRequest;
use super::PUBLIC_APIS; use super::PUBLIC_APIS;
use crate::auth::Error::IllegalParam; use crate::auth::Error::IllegalParam;
use crate::auth::{Identity, IllegalParamSnafu, UserProviderRef}; use crate::auth::{Identity, IllegalParamSnafu, UserProviderRef};
use crate::error::Error::Auth;
use crate::error::{ use crate::error::{
self, InvalidAuthorizationHeaderSnafu, InvisibleASCIISnafu, NotFoundInfluxAuthSnafu, Result, self, AuthSnafu, InvalidAuthorizationHeaderSnafu, InvisibleASCIISnafu, NotFoundInfluxAuthSnafu,
UnsupportedAuthSchemeSnafu, Result, UnsupportedAuthSchemeSnafu,
}; };
use crate::http::HTTP_API_PREFIX; use crate::http::HTTP_API_PREFIX;
@@ -183,12 +182,9 @@ fn get_influxdb_credentials<B: Send + Sync + 'static>(
(Some(username), Some(password)) => { (Some(username), Some(password)) => {
Ok(Some((username.to_string(), password.to_string().into()))) Ok(Some((username.to_string(), password.to_string().into())))
} }
_ => Err(Auth { _ => Err(AuthSnafu.into_error(IllegalParam {
source: IllegalParam { msg: "influxdb auth: username and password must be provided together".to_string(),
msg: "influxdb auth: username and password must be provided together" })),
.to_string(),
},
}),
} }
} }
} }

View File

@@ -19,14 +19,14 @@ use aide::transform::TransformOperation;
use axum::extract::{Json, Query, State}; use axum::extract::{Json, Query, State};
use axum::{Extension, Form}; use axum::{Extension, Form};
use common_error::status_code::StatusCode; use common_error::status_code::StatusCode;
use common_telemetry::timer; use common_telemetry::{error, timer};
use query::parser::PromQuery; use query::parser::PromQuery;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use session::context::UserInfo; use session::context::UserInfo;
use crate::http::{ApiState, JsonResponse}; use crate::http::{ApiState, JsonResponse};
use crate::metrics::PROCESS_COLLECTOR; use crate::metrics::{JEMALLOC_COLLECTOR, PROCESS_COLLECTOR};
use crate::metrics_handler::MetricsHandler; use crate::metrics_handler::MetricsHandler;
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
@@ -137,7 +137,11 @@ pub async fn metrics(
) -> String { ) -> String {
// Collect process metrics. // Collect process metrics.
PROCESS_COLLECTOR.collect(); PROCESS_COLLECTOR.collect();
if let Some(c) = JEMALLOC_COLLECTOR.as_ref() {
if let Err(e) = c.update() {
error!(e; "Failed to update jemalloc metrics");
}
}
state.render() state.render()
} }

View File

@@ -14,13 +14,14 @@
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::response::IntoResponse; use axum::response::IntoResponse;
use snafu::ResultExt;
use crate::error::DumpProfileDataSnafu;
#[cfg(feature = "mem-prof")] #[cfg(feature = "mem-prof")]
#[axum_macros::debug_handler] #[axum_macros::debug_handler]
pub async fn mem_prof() -> crate::error::Result<impl IntoResponse> { pub async fn mem_prof_handler() -> crate::error::Result<impl IntoResponse> {
use snafu::ResultExt;
use crate::error::DumpProfileDataSnafu;
Ok(( Ok((
StatusCode::OK, StatusCode::OK,
common_mem_prof::dump_profile() common_mem_prof::dump_profile()
@@ -28,3 +29,12 @@ pub async fn mem_prof() -> crate::error::Result<impl IntoResponse> {
.context(DumpProfileDataSnafu)?, .context(DumpProfileDataSnafu)?,
)) ))
} }
#[cfg(not(feature = "mem-prof"))]
#[axum_macros::debug_handler]
pub async fn mem_prof_handler() -> crate::error::Result<impl IntoResponse> {
Ok((
StatusCode::NOT_IMPLEMENTED,
"The 'mem-prof' feature is disabled",
))
}

View File

@@ -0,0 +1,98 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "pprof")]
pub mod handler {
use std::num::NonZeroI32;
use std::time::Duration;
use axum::extract::Query;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use common_pprof::Profiling;
use common_telemetry::logging;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{DumpPprofSnafu, Result};
/// Output format.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum Output {
/// googles pprof format report in protobuf.
Proto,
/// Simple text format.
Text,
/// svg flamegraph.
Flamegraph,
}
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
#[serde(default)]
pub struct PprofQuery {
seconds: u64,
frequency: NonZeroI32,
output: Output,
}
impl Default for PprofQuery {
fn default() -> PprofQuery {
PprofQuery {
seconds: 5,
// Safety: 99 is non zero.
frequency: NonZeroI32::new(99).unwrap(),
output: Output::Proto,
}
}
}
#[axum_macros::debug_handler]
pub async fn pprof_handler(Query(req): Query<PprofQuery>) -> Result<impl IntoResponse> {
logging::info!("start pprof, request: {:?}", req);
let profiling = Profiling::new(Duration::from_secs(req.seconds), req.frequency.into());
let body = match req.output {
Output::Proto => profiling.dump_proto().await.context(DumpPprofSnafu)?,
Output::Text => {
let report = profiling.report().await.context(DumpPprofSnafu)?;
format!("{:?}", report).into_bytes()
}
Output::Flamegraph => profiling.dump_flamegraph().await.context(DumpPprofSnafu)?,
};
logging::info!("finish pprof");
Ok((StatusCode::OK, body))
}
}
#[cfg(not(feature = "pprof"))]
pub mod handler {
use axum::http::StatusCode;
use axum::response::IntoResponse;
use crate::error::Result;
#[axum_macros::debug_handler]
pub async fn pprof_handler() -> Result<impl IntoResponse> {
Ok((
StatusCode::NOT_IMPLEMENTED,
"The 'pprof' feature is disabled",
))
}
}
pub use handler::pprof_handler;

View File

@@ -15,12 +15,20 @@
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::Instant; use std::time::Instant;
use common_telemetry::error;
use hyper::Body; use hyper::Body;
use metrics::gauge;
use metrics_process::Collector; use metrics_process::Collector;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use snafu::ResultExt;
use tikv_jemalloc_ctl::stats::{allocated_mib, resident_mib};
use tikv_jemalloc_ctl::{epoch, epoch_mib, stats};
use tonic::body::BoxBody; use tonic::body::BoxBody;
use tower::{Layer, Service}; use tower::{Layer, Service};
use crate::error;
use crate::error::UpdateJemallocMetricsSnafu;
pub(crate) const METRIC_DB_LABEL: &str = "db"; pub(crate) const METRIC_DB_LABEL: &str = "db";
pub(crate) const METRIC_CODE_LABEL: &str = "code"; pub(crate) const METRIC_CODE_LABEL: &str = "code";
pub(crate) const METRIC_TYPE_LABEL: &str = "type"; pub(crate) const METRIC_TYPE_LABEL: &str = "type";
@@ -59,6 +67,8 @@ pub(crate) const METRIC_GRPC_REQUESTS_ELAPSED: &str = "servers.grpc_requests_ela
pub(crate) const METRIC_METHOD_LABEL: &str = "method"; pub(crate) const METRIC_METHOD_LABEL: &str = "method";
pub(crate) const METRIC_PATH_LABEL: &str = "path"; pub(crate) const METRIC_PATH_LABEL: &str = "path";
pub(crate) const METRIC_STATUS_LABEL: &str = "status"; pub(crate) const METRIC_STATUS_LABEL: &str = "status";
pub(crate) const METRIC_JEMALLOC_RESIDENT: &str = "sys.jemalloc.resident";
pub(crate) const METRIC_JEMALLOC_ALLOCATED: &str = "sys.jemalloc.allocated";
/// Prometheus style process metrics collector. /// Prometheus style process metrics collector.
pub(crate) static PROCESS_COLLECTOR: Lazy<Collector> = Lazy::new(|| { pub(crate) static PROCESS_COLLECTOR: Lazy<Collector> = Lazy::new(|| {
@@ -68,6 +78,49 @@ pub(crate) static PROCESS_COLLECTOR: Lazy<Collector> = Lazy::new(|| {
collector collector
}); });
pub(crate) static JEMALLOC_COLLECTOR: Lazy<Option<JemallocCollector>> = Lazy::new(|| {
let collector = JemallocCollector::try_new()
.map_err(|e| {
error!(e; "Failed to retrieve jemalloc metrics");
e
})
.ok();
collector.map(|c| {
if let Err(e) = c.update() {
error!(e; "Failed to update jemalloc metrics");
};
c
})
});
pub(crate) struct JemallocCollector {
epoch: epoch_mib,
allocated: allocated_mib,
resident: resident_mib,
}
impl JemallocCollector {
pub(crate) fn try_new() -> error::Result<Self> {
let e = epoch::mib().context(UpdateJemallocMetricsSnafu)?;
let allocated = stats::allocated::mib().context(UpdateJemallocMetricsSnafu)?;
let resident = stats::resident::mib().context(UpdateJemallocMetricsSnafu)?;
Ok(Self {
epoch: e,
allocated,
resident,
})
}
pub(crate) fn update(&self) -> error::Result<()> {
self.epoch.advance().context(UpdateJemallocMetricsSnafu)?;
let allocated = self.allocated.read().context(UpdateJemallocMetricsSnafu)?;
let resident = self.resident.read().context(UpdateJemallocMetricsSnafu)?;
gauge!(METRIC_JEMALLOC_ALLOCATED, allocated as f64);
gauge!(METRIC_JEMALLOC_RESIDENT, resident as f64);
Ok(())
}
}
// Based on https://github.com/hyperium/tonic/blob/master/examples/src/tower/server.rs // Based on https://github.com/hyperium/tonic/blob/master/examples/src/tower/server.rs
// See https://github.com/hyperium/tonic/issues/242 // See https://github.com/hyperium/tonic/issues/242
/// A metrics middleware. /// A metrics middleware.

View File

@@ -16,6 +16,7 @@
//! Inspired by Databend's "[mysql_federated.rs](https://github.com/datafuselabs/databend/blob/ac706bf65845e6895141c96c0a10bad6fdc2d367/src/query/service/src/servers/mysql/mysql_federated.rs)". //! Inspired by Databend's "[mysql_federated.rs](https://github.com/datafuselabs/databend/blob/ac706bf65845e6895141c96c0a10bad6fdc2d367/src/query/service/src/servers/mysql/mysql_federated.rs)".
use std::collections::HashMap; use std::collections::HashMap;
use std::env;
use std::sync::Arc; use std::sync::Arc;
use common_query::Output; use common_query::Output;
@@ -30,9 +31,6 @@ use regex::bytes::RegexSet;
use regex::Regex; use regex::Regex;
use session::context::QueryContextRef; use session::context::QueryContextRef;
// TODO(LFC): Include GreptimeDB's version and git commit tag etc.
const MYSQL_VERSION: &str = "8.0.26";
static SELECT_VAR_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap()); static SELECT_VAR_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap());
static MYSQL_CONN_JAVA_PATTERN: Lazy<Regex> = static MYSQL_CONN_JAVA_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(/\\* mysql-connector-j(.*))").unwrap()); Lazy::new(|| Regex::new("(?i)^(/\\* mysql-connector-j(.*))").unwrap());
@@ -285,7 +283,7 @@ fn check_others(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
} }
let recordbatches = if SELECT_VERSION_PATTERN.is_match(query) { let recordbatches = if SELECT_VERSION_PATTERN.is_match(query) {
Some(select_function("version()", MYSQL_VERSION)) Some(select_function("version()", &get_version()))
} else if SELECT_DATABASE_PATTERN.is_match(query) { } else if SELECT_DATABASE_PATTERN.is_match(query) {
let schema = query_ctx.current_schema(); let schema = query_ctx.current_schema();
Some(select_function("database()", &schema)) Some(select_function("database()", &schema))
@@ -318,8 +316,16 @@ pub(crate) fn check(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
.or_else(|| check_others(query, query_ctx)) .or_else(|| check_others(query, query_ctx))
} }
// get GreptimeDB's version.
fn get_version() -> String {
format!(
"{}-greptime",
env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "unknown".to_string()),
)
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use session::context::QueryContext; use session::context::QueryContext;
use super::*; use super::*;
@@ -345,13 +351,15 @@ mod test {
} }
let query = "select version()"; let query = "select version()";
let expected = "\ let expected = format!(
+-----------+ r#"+----------------+
| version() | | version() |
+-----------+ +----------------+
| 8.0.26 | | {}-greptime |
+-----------+"; +----------------+"#,
test(query, expected); env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "unknown".to_string())
);
test(query, &expected);
let query = "SELECT @@version_comment LIMIT 1"; let query = "SELECT @@version_comment LIMIT 1";
let expected = "\ let expected = "\

View File

@@ -88,9 +88,6 @@ impl MysqlInstanceShim {
trace!("Start executing query: '{}'", query); trace!("Start executing query: '{}'", query);
let start = Instant::now(); let start = Instant::now();
// TODO(LFC): Find a better way to deal with these special federated queries:
// `check` uses regex to filter out unsupported statements emitted by MySQL's federated
// components, this is quick and dirty, there must be a better way to do it.
let output = let output =
if let Some(output) = crate::mysql::federated::check(query, self.session.context()) { if let Some(output) = crate::mysql::federated::check(query, self.session.context()) {
vec![Ok(output)] vec![Ok(output)]

View File

@@ -157,7 +157,6 @@ impl MysqlServer {
info!("MySQL connection coming from: {}", stream.peer_addr()?); info!("MySQL connection coming from: {}", stream.peer_addr()?);
io_runtime.spawn(async move { io_runtime.spawn(async move {
increment_gauge!(crate::metrics::METRIC_MYSQL_CONNECTIONS, 1.0); increment_gauge!(crate::metrics::METRIC_MYSQL_CONNECTIONS, 1.0);
// TODO(LFC): Use `output_stream` to write large MySQL ResultSet to client.
if let Err(e) = Self::do_handle(stream, spawn_ref, spawn_config).await { if let Err(e) = Self::do_handle(stream, spawn_ref, spawn_config).await {
// TODO(LFC): Write this error to client as well, in MySQL text protocol. // TODO(LFC): Write this error to client as well, in MySQL text protocol.
// Looks like we have to expose opensrv-mysql's `PacketWriter`? // Looks like we have to expose opensrv-mysql's `PacketWriter`?

View File

@@ -26,10 +26,11 @@ use pgwire::messages::startup::Authentication;
use pgwire::messages::{PgWireBackendMessage, PgWireFrontendMessage}; use pgwire::messages::{PgWireBackendMessage, PgWireFrontendMessage};
use session::context::UserInfo; use session::context::UserInfo;
use session::Session; use session::Session;
use snafu::IntoError;
use super::PostgresServerHandler; use super::PostgresServerHandler;
use crate::auth::{Identity, Password, UserProviderRef}; use crate::auth::{Identity, Password, UserProviderRef};
use crate::error::Result; use crate::error::{AuthSnafu, Result};
use crate::query_handler::sql::ServerSqlQueryHandlerRef; use crate::query_handler::sql::ServerSqlQueryHandlerRef;
pub(crate) struct PgLoginVerifier { pub(crate) struct PgLoginVerifier {
@@ -106,7 +107,7 @@ impl PgLoginVerifier {
format!("{}", e.status_code()) format!("{}", e.status_code())
)] )]
); );
Err(e.into()) Err(AuthSnafu.into_error(e))
} else { } else {
Ok(true) Ok(true)
} }

View File

@@ -42,7 +42,7 @@ use schemars::JsonSchema;
use serde::de::{self, MapAccess, Visitor}; use serde::de::{self, MapAccess, Visitor};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use session::context::{QueryContext, QueryContextRef}; use session::context::{QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt}; use snafu::{ensure, Location, OptionExt, ResultExt};
use tokio::sync::oneshot::Sender; use tokio::sync::oneshot::Sender;
use tokio::sync::{oneshot, Mutex}; use tokio::sync::{oneshot, Mutex};
use tower::ServiceBuilder; use tower::ServiceBuilder;
@@ -52,7 +52,7 @@ use tower_http::trace::TraceLayer;
use crate::auth::UserProviderRef; use crate::auth::UserProviderRef;
use crate::error::{ use crate::error::{
AlreadyStartedSnafu, CollectRecordbatchSnafu, Error, InternalSnafu, NotSupportedSnafu, Result, AlreadyStartedSnafu, CollectRecordbatchSnafu, Error, InternalSnafu, InvalidQuerySnafu, Result,
StartHttpSnafu, UnexpectedResultSnafu, StartHttpSnafu, UnexpectedResultSnafu,
}; };
use crate::http::authorize::HttpAuth; use crate::http::authorize::HttpAuth;
@@ -97,6 +97,7 @@ impl PromServer {
.route("/query", routing::post(instant_query).get(instant_query)) .route("/query", routing::post(instant_query).get(instant_query))
.route("/query_range", routing::post(range_query).get(range_query)) .route("/query_range", routing::post(range_query).get(range_query))
.route("/labels", routing::post(labels_query).get(labels_query)) .route("/labels", routing::post(labels_query).get(labels_query))
.route("/series", routing::post(series_query).get(series_query))
.route( .route(
"/label/:label_name/values", "/label/:label_name/values",
routing::get(label_values_query), routing::get(label_values_query),
@@ -191,6 +192,7 @@ pub struct PromData {
pub enum PromResponse { pub enum PromResponse {
PromData(PromData), PromData(PromData),
Labels(Vec<String>), Labels(Vec<String>),
Series(Vec<HashMap<String, String>>),
LabelValues(Vec<String>), LabelValues(Vec<String>),
} }
@@ -242,7 +244,7 @@ impl PromJsonResponse {
pub async fn from_query_result( pub async fn from_query_result(
result: Result<Output>, result: Result<Output>,
metric_name: String, metric_name: String,
result_type: Option<ValueType>, result_type: ValueType,
) -> Json<Self> { ) -> Json<Self> {
let response: Result<Json<Self>> = try { let response: Result<Json<Self>> = try {
let json = match result? { let json = match result? {
@@ -269,7 +271,7 @@ impl PromJsonResponse {
json json
}; };
let result_type_string = result_type.map(|t| t.to_string()).unwrap_or_default(); let result_type_string = result_type.to_string();
match response { match response {
Ok(resp) => resp, Ok(resp) => resp,
@@ -293,7 +295,7 @@ impl PromJsonResponse {
fn record_batches_to_data( fn record_batches_to_data(
batches: RecordBatches, batches: RecordBatches,
metric_name: String, metric_name: String,
result_type: Option<ValueType>, result_type: ValueType,
) -> Result<PromResponse> { ) -> Result<PromResponse> {
// infer semantic type of each column from schema. // infer semantic type of each column from schema.
// TODO(ruihang): wish there is a better way to do this. // TODO(ruihang): wish there is a better way to do this.
@@ -388,27 +390,21 @@ impl PromJsonResponse {
.map(|(tags, mut values)| { .map(|(tags, mut values)| {
let metric = tags.into_iter().collect(); let metric = tags.into_iter().collect();
match result_type { match result_type {
Some(ValueType::Vector) | Some(ValueType::Scalar) | Some(ValueType::String) => { ValueType::Vector | ValueType::Scalar | ValueType::String => Ok(PromSeries {
Ok(PromSeries { metric,
metric, value: values.pop(),
value: values.pop(), ..Default::default()
..Default::default() }),
}) ValueType::Matrix => Ok(PromSeries {
}
Some(ValueType::Matrix) => Ok(PromSeries {
metric, metric,
values, values,
..Default::default() ..Default::default()
}), }),
other => NotSupportedSnafu {
feat: format!("PromQL result type {other:?}"),
}
.fail(),
} }
}) })
.collect::<Result<Vec<_>>>()?; .collect::<Result<Vec<_>>>()?;
let result_type_string = result_type.map(|t| t.to_string()).unwrap_or_default(); let result_type_string = result_type.to_string();
let data = PromResponse::PromData(PromData { let data = PromResponse::PromData(PromData {
result_type: result_type_string, result_type: result_type_string,
result, result,
@@ -450,8 +446,10 @@ pub async fn instant_query(
let query_ctx = QueryContext::with(catalog, schema); let query_ctx = QueryContext::with(catalog, schema);
let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await; let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
let (metric_name, result_type) = let (metric_name, result_type) = match retrieve_metric_name_and_result_type(&prom_query.query) {
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default(); Ok((metric_name, result_type)) => (metric_name.unwrap_or_default(), result_type),
Err(err) => return PromJsonResponse::error(err.status_code().to_string(), err.to_string()),
};
PromJsonResponse::from_query_result(result, metric_name, result_type).await PromJsonResponse::from_query_result(result, metric_name, result_type).await
} }
@@ -484,9 +482,11 @@ pub async fn range_query(
let query_ctx = QueryContext::with(catalog, schema); let query_ctx = QueryContext::with(catalog, schema);
let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await; let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
let (metric_name, _) = let metric_name = match retrieve_metric_name_and_result_type(&prom_query.query) {
retrieve_metric_name_and_result_type(&prom_query.query).unwrap_or_default(); Err(err) => return PromJsonResponse::error(err.status_code().to_string(), err.to_string()),
PromJsonResponse::from_query_result(result, metric_name, Some(ValueType::Matrix)).await Ok((metric_name, _)) => metric_name.unwrap_or_default(),
};
PromJsonResponse::from_query_result(result, metric_name, ValueType::Matrix).await
} }
#[derive(Debug, Default, Serialize, JsonSchema)] #[derive(Debug, Default, Serialize, JsonSchema)]
@@ -593,6 +593,30 @@ pub async fn labels_query(
PromJsonResponse::success(PromResponse::Labels(sorted_labels)) PromJsonResponse::success(PromResponse::Labels(sorted_labels))
} }
async fn retrieve_series_from_query_result(
result: Result<Output>,
series: &mut Vec<HashMap<String, String>>,
table_name: &str,
) -> Result<()> {
match result? {
Output::RecordBatches(batches) => {
record_batches_to_series(batches, series, table_name)?;
Ok(())
}
Output::Stream(stream) => {
let batches = RecordBatches::try_collect(stream)
.await
.context(CollectRecordbatchSnafu)?;
record_batches_to_series(batches, series, table_name)?;
Ok(())
}
Output::AffectedRows(_) => Err(Error::UnexpectedResult {
reason: "expected data result, but got affected rows".to_string(),
location: Location::default(),
}),
}
}
/// Retrieve labels name from query result /// Retrieve labels name from query result
async fn retrieve_labels_name_from_query_result( async fn retrieve_labels_name_from_query_result(
result: Result<Output>, result: Result<Output>,
@@ -617,6 +641,28 @@ async fn retrieve_labels_name_from_query_result(
} }
} }
fn record_batches_to_series(
batches: RecordBatches,
series: &mut Vec<HashMap<String, String>>,
table_name: &str,
) -> Result<()> {
for batch in batches.iter() {
for row in batch.rows() {
let mut element: HashMap<String, String> = row
.iter()
.enumerate()
.map(|(idx, column)| {
let column_name = batch.schema.column_name_by_index(idx);
(column_name.to_string(), column.to_string())
})
.collect();
element.insert("__name__".to_string(), table_name.to_string());
series.push(element);
}
}
Ok(())
}
/// Retrieve labels name from record batches /// Retrieve labels name from record batches
fn record_batches_to_labels_name( fn record_batches_to_labels_name(
batches: RecordBatches, batches: RecordBatches,
@@ -675,12 +721,13 @@ fn record_batches_to_labels_name(
pub(crate) fn retrieve_metric_name_and_result_type( pub(crate) fn retrieve_metric_name_and_result_type(
promql: &str, promql: &str,
) -> Option<(String, Option<ValueType>)> { ) -> Result<(Option<String>, ValueType)> {
let promql_expr = promql_parser::parser::parse(promql).ok()?; let promql_expr = promql_parser::parser::parse(promql)
let metric_name = promql_expr_to_metric_name(&promql_expr)?; .map_err(|reason| InvalidQuerySnafu { reason }.build())?;
let result_type = Some(promql_expr.value_type()); let metric_name = promql_expr_to_metric_name(&promql_expr);
let result_type = promql_expr.value_type();
Some((metric_name, result_type)) Ok((metric_name, result_type))
} }
fn promql_expr_to_metric_name(expr: &PromqlExpr) -> Option<String> { fn promql_expr_to_metric_name(expr: &PromqlExpr) -> Option<String> {
@@ -803,14 +850,12 @@ async fn retrieve_label_values_from_record_batch(
ConcreteDataType::String(_) => {} ConcreteDataType::String(_) => {}
_ => return Ok(()), _ => return Ok(()),
} }
for batch in batches.iter() { for batch in batches.iter() {
let label_column = batch let label_column = batch
.column(label_col_idx) .column(label_col_idx)
.as_any() .as_any()
.downcast_ref::<StringVector>() .downcast_ref::<StringVector>()
.unwrap(); .unwrap();
for row_index in 0..batch.num_rows() { for row_index in 0..batch.num_rows() {
if let Some(label_value) = label_column.get_data(row_index) { if let Some(label_value) = label_column.get_data(row_index) {
labels_values.insert(label_value.to_string()); labels_values.insert(label_value.to_string());
@@ -820,3 +865,57 @@ async fn retrieve_label_values_from_record_batch(
Ok(()) Ok(())
} }
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct SeriesQuery {
start: Option<String>,
end: Option<String>,
#[serde(flatten)]
matches: Matches,
db: Option<String>,
}
#[axum_macros::debug_handler]
pub async fn series_query(
State(handler): State<PromHandlerRef>,
Query(params): Query<SeriesQuery>,
Form(form_params): Form<SeriesQuery>,
) -> Json<PromJsonResponse> {
let mut queries: Vec<String> = params.matches.0;
if queries.is_empty() {
queries = form_params.matches.0;
}
if queries.is_empty() {
return PromJsonResponse::error("Unsupported", "match[] parameter is required");
}
let start = params
.start
.or(form_params.start)
.unwrap_or_else(yesterday_rfc3339);
let end = params
.end
.or(form_params.end)
.unwrap_or_else(current_time_rfc3339);
let db = &params.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
let (catalog, schema) = super::parse_catalog_and_schema_from_client_database_name(db);
let query_ctx = Arc::new(QueryContext::with(catalog, schema));
let mut series = Vec::new();
for query in queries {
let table_name = query.clone();
let prom_query = PromQuery {
query,
start: start.clone(),
end: end.clone(),
// TODO: find a better value for step
step: DEFAULT_LOOKBACK_STRING.to_string(),
};
let result = handler.do_query(&prom_query, query_ctx.clone()).await;
if let Err(err) = retrieve_series_from_query_result(result, &mut series, &table_name).await
{
return PromJsonResponse::error(err.status_code().to_string(), err.to_string());
}
}
PromJsonResponse::success(PromResponse::Series(series))
}

View File

@@ -43,7 +43,6 @@ pub trait SqlQueryHandler {
query_ctx: QueryContextRef, query_ctx: QueryContextRef,
) -> Vec<std::result::Result<Output, Self::Error>>; ) -> Vec<std::result::Result<Output, Self::Error>>;
// TODO(LFC): revisit this for mysql prepared statement
async fn do_describe( async fn do_describe(
&self, &self,
stmt: Statement, stmt: Statement,

View File

@@ -98,13 +98,13 @@ pub enum Error {
#[snafu(display("Invalid default constraint, column: {}, source: {}", column, source))] #[snafu(display("Invalid default constraint, column: {}, source: {}", column, source))]
InvalidDefault { InvalidDefault {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
#[snafu(display("Failed to serialize column default constraint, source: {}", source))] #[snafu(display("Failed to serialize column default constraint, source: {}", source))]
SerializeColumnDefaultConstraint { SerializeColumnDefaultConstraint {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -113,7 +113,7 @@ pub enum Error {
source source
))] ))]
ConvertToGrpcDataType { ConvertToGrpcDataType {
#[snafu(backtrace)] location: Location,
source: api::error::Error, source: api::error::Error,
}, },

View File

@@ -41,7 +41,7 @@ impl BenchContext {
batch_size, batch_size,
..Default::default() ..Default::default()
}; };
let iter = self.memtable.iter(&iter_ctx).unwrap(); let iter = self.memtable.iter(iter_ctx).unwrap();
for batch in iter { for batch in iter {
batch.unwrap(); batch.unwrap();
read_count += batch_size; read_count += batch_size;

View File

@@ -62,7 +62,6 @@ impl RegionDescBuilder {
row_key: self.key_builder.build().unwrap(), row_key: self.key_builder.build().unwrap(),
default_cf: self.default_cf_builder.build().unwrap(), default_cf: self.default_cf_builder.build().unwrap(),
extra_cfs: Vec::new(), extra_cfs: Vec::new(),
compaction_time_window: None,
} }
} }

View File

@@ -220,7 +220,9 @@ impl ChunkReaderBuilder {
.batch_size(self.iter_ctx.batch_size); .batch_size(self.iter_ctx.batch_size);
for mem in &self.memtables { for mem in &self.memtables {
let iter = mem.iter(&self.iter_ctx)?; let mut iter_ctx = self.iter_ctx.clone();
iter_ctx.time_range = Some(*time_range);
let iter = mem.iter(iter_ctx)?;
reader_builder = reader_builder.push_batch_iter(iter); reader_builder = reader_builder.push_batch_iter(iter);
} }

View File

@@ -120,6 +120,7 @@ impl<S: LogStore> Picker for SimplePicker<S> {
} }
let ctx = &PickerContext::with(req.compaction_time_window); let ctx = &PickerContext::with(req.compaction_time_window);
for level_num in 0..levels.level_num() { for level_num in 0..levels.level_num() {
let level = levels.level(level_num as u8); let level = levels.level(level_num as u8);
let (compaction_time_window, outputs) = self.strategy.pick(ctx, level); let (compaction_time_window, outputs) = self.strategy.pick(ctx, level);
@@ -130,8 +131,8 @@ impl<S: LogStore> Picker for SimplePicker<S> {
} }
debug!( debug!(
"Found SST files to compact {:?} on level: {}", "Found SST files to compact {:?} on level: {}, compaction window: {:?}",
outputs, level_num outputs, level_num, compaction_time_window,
); );
return Ok(Some(CompactionTaskImpl { return Ok(Some(CompactionTaskImpl {
schema: req.schema(), schema: req.schema(),

View File

@@ -47,19 +47,24 @@ impl Strategy for SimpleTimeWindowStrategy {
if files.is_empty() { if files.is_empty() {
return (None, vec![]); return (None, vec![]);
} }
let time_bucket = ctx let time_window = ctx.compaction_time_window().unwrap_or_else(|| {
.compaction_time_window() let inferred = infer_time_bucket(&files);
.unwrap_or_else(|| infer_time_bucket(&files)); debug!(
let buckets = calculate_time_buckets(time_bucket, &files); "Compaction window is not present, inferring from files: {:?}",
debug!("File bucket:{}, file groups: {:?}", time_bucket, buckets); inferred
);
inferred
});
let buckets = calculate_time_buckets(time_window, &files);
debug!("File bucket:{}, file groups: {:?}", time_window, buckets);
( (
Some(time_bucket), Some(time_window),
buckets buckets
.into_iter() .into_iter()
.map(|(bound, files)| CompactionOutput { .map(|(bound, files)| CompactionOutput {
output_level: 1, output_level: 1,
bucket_bound: bound, bucket_bound: bound,
bucket: time_bucket, bucket: time_window,
inputs: files, inputs: files,
}) })
.collect(), .collect(),

View File

@@ -102,7 +102,6 @@ impl<S: LogStore> CompactionTaskImpl<S> {
} }
/// Writes updated SST info into manifest. /// Writes updated SST info into manifest.
// TODO(etolbakov): we are not persisting inferred compaction_time_window (#1083)[https://github.com/GreptimeTeam/greptimedb/pull/1083]
async fn write_manifest_and_apply( async fn write_manifest_and_apply(
&self, &self,
output: HashSet<FileMeta>, output: HashSet<FileMeta>,
@@ -116,6 +115,7 @@ impl<S: LogStore> CompactionTaskImpl<S> {
flushed_sequence: None, flushed_sequence: None,
files_to_add: Vec::from_iter(output.into_iter()), files_to_add: Vec::from_iter(output.into_iter()),
files_to_remove: Vec::from_iter(input.into_iter()), files_to_remove: Vec::from_iter(input.into_iter()),
compaction_time_window: self.compaction_time_window,
}; };
debug!( debug!(
"Compacted region: {}, region edit: {:?}", "Compacted region: {}, region edit: {:?}",
@@ -151,7 +151,10 @@ impl<S: LogStore> CompactionTask for CompactionTaskImpl<S> {
let input_ids = compacted.iter().map(|f| f.file_id).collect::<Vec<_>>(); let input_ids = compacted.iter().map(|f| f.file_id).collect::<Vec<_>>();
let output_ids = output.iter().map(|f| f.file_id).collect::<Vec<_>>(); let output_ids = output.iter().map(|f| f.file_id).collect::<Vec<_>>();
info!("Compacting SST files, input: {input_ids:?}, output: {output_ids:?}"); info!(
"Compacting SST files, input: {:?}, output: {:?}, window: {:?}",
input_ids, output_ids, self.compaction_time_window
);
self.write_manifest_and_apply(output, compacted) self.write_manifest_and_apply(output, compacted)
.await .await
.map_err(|e| { .map_err(|e| {

View File

@@ -217,7 +217,7 @@ mod tests {
seq.fetch_add(1, Ordering::Relaxed); seq.fetch_add(1, Ordering::Relaxed);
} }
let iter = memtable.iter(&IterContext::default()).unwrap(); let iter = memtable.iter(IterContext::default()).unwrap();
let file_path = sst_file_id.as_parquet(); let file_path = sst_file_id.as_parquet();
let writer = ParquetWriter::new(&file_path, Source::Iter(iter), object_store.clone()); let writer = ParquetWriter::new(&file_path, Source::Iter(iter), object_store.clone());

View File

@@ -28,7 +28,7 @@ use store_api::storage::{
}; };
use crate::compaction::CompactionSchedulerRef; use crate::compaction::CompactionSchedulerRef;
use crate::config::{EngineConfig, DEFAULT_REGION_WRITE_BUFFER_SIZE}; use crate::config::EngineConfig;
use crate::error::{self, Error, Result}; use crate::error::{self, Error, Result};
use crate::file_purger::{FilePurgeHandler, FilePurgerRef}; use crate::file_purger::{FilePurgeHandler, FilePurgerRef};
use crate::flush::{ use crate::flush::{
@@ -89,7 +89,7 @@ impl<S: LogStore> StorageEngine for EngineImpl<S> {
async fn drop_region(&self, _ctx: &EngineContext, region: Self::Region) -> Result<()> { async fn drop_region(&self, _ctx: &EngineContext, region: Self::Region) -> Result<()> {
region.drop_region().await?; region.drop_region().await?;
self.inner.remove_reigon(region.name()); self.inner.remove_region(region.name());
Ok(()) Ok(())
} }
@@ -395,7 +395,6 @@ impl<S: LogStore> EngineInner<S> {
name, name,
&self.config, &self.config,
opts.ttl, opts.ttl,
opts.compaction_time_window,
) )
.await?; .await?;
@@ -441,7 +440,6 @@ impl<S: LogStore> EngineInner<S> {
&region_name, &region_name,
&self.config, &self.config,
opts.ttl, opts.ttl,
opts.compaction_time_window,
) )
.await?; .await?;
@@ -462,7 +460,7 @@ impl<S: LogStore> EngineInner<S> {
self.regions.get_region(name) self.regions.get_region(name)
} }
fn remove_reigon(&self, name: &str) { fn remove_region(&self, name: &str) {
self.regions.remove(name) self.regions.remove(name)
} }
@@ -473,7 +471,6 @@ impl<S: LogStore> EngineInner<S> {
region_name: &str, region_name: &str,
config: &EngineConfig, config: &EngineConfig,
region_ttl: Option<Duration>, region_ttl: Option<Duration>,
compaction_time_window: Option<i64>,
) -> Result<StoreConfig<S>> { ) -> Result<StoreConfig<S>> {
let parent_dir = util::normalize_dir(parent_dir); let parent_dir = util::normalize_dir(parent_dir);
@@ -504,9 +501,8 @@ impl<S: LogStore> EngineInner<S> {
engine_config: self.config.clone(), engine_config: self.config.clone(),
file_purger: self.file_purger.clone(), file_purger: self.file_purger.clone(),
ttl, ttl,
compaction_time_window,
write_buffer_size: write_buffer_size write_buffer_size: write_buffer_size
.unwrap_or(DEFAULT_REGION_WRITE_BUFFER_SIZE.as_bytes() as usize), .unwrap_or(self.config.region_write_buffer_size.as_bytes() as usize),
}) })
} }
@@ -553,7 +549,7 @@ mod tests {
log_file_dir: &TempDir, log_file_dir: &TempDir,
region_name: &str, region_name: &str,
region_id: u64, region_id: u64,
ctx: &EngineContext, config: EngineConfig,
) -> (TestEngine, TestRegion) { ) -> (TestEngine, TestRegion) {
let log_file_dir_path = log_file_dir.path().to_str().unwrap(); let log_file_dir_path = log_file_dir.path().to_str().unwrap();
let log_store = log_store_util::create_tmp_local_file_log_store(log_file_dir_path).await; let log_store = log_store_util::create_tmp_local_file_log_store(log_file_dir_path).await;
@@ -564,8 +560,6 @@ mod tests {
builder.root(&store_dir); builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish(); let object_store = ObjectStore::new(builder).unwrap().finish();
let config = EngineConfig::default();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default()); let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let engine = EngineImpl::new( let engine = EngineImpl::new(
@@ -584,7 +578,7 @@ mod tests {
.build(); .build();
let region = engine let region = engine
.create_region(ctx, desc, &CreateOptions::default()) .create_region(&EngineContext::default(), desc, &CreateOptions::default())
.await .await
.unwrap(); .unwrap();
@@ -606,18 +600,38 @@ mod tests {
let region_name = "region-0"; let region_name = "region-0";
let region_id = 123456; let region_id = 123456;
let ctx = EngineContext::default(); let config = EngineConfig::default();
let (engine, region) = let (engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, &ctx).await; create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(region_name, region.name()); assert_eq!(region_name, region.name());
let ctx = EngineContext::default();
let region2 = engine.get_region(&ctx, region_name).unwrap().unwrap(); let region2 = engine.get_region(&ctx, region_name).unwrap().unwrap();
assert_eq!(region_name, region2.name()); assert_eq!(region_name, region2.name());
assert!(engine.get_region(&ctx, "no such region").unwrap().is_none()); assert!(engine.get_region(&ctx, "no such region").unwrap().is_none());
} }
#[tokio::test]
async fn test_create_region_with_buffer_size() {
let dir = create_temp_dir("test_buffer_size");
let log_file_dir = create_temp_dir("test_buffer_wal");
let region_name = "region-0";
let region_id = 123456;
let mut config = EngineConfig::default();
let expect_buffer_size = config.region_write_buffer_size / 2;
config.region_write_buffer_size = expect_buffer_size;
let (_engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(
expect_buffer_size.as_bytes() as usize,
region.write_buffer_size().await
);
}
#[tokio::test] #[tokio::test]
async fn test_drop_region() { async fn test_drop_region() {
common_telemetry::init_default_ut_logging(); common_telemetry::init_default_ut_logging();
@@ -626,10 +640,10 @@ mod tests {
let region_name = "test_region"; let region_name = "test_region";
let region_id = 123456; let region_id = 123456;
let ctx = EngineContext::default(); let config = EngineConfig::default();
let (engine, region) = let (engine, region) =
create_engine_and_region(&dir, &log_file_dir, region_name, region_id, &ctx).await; create_engine_and_region(&dir, &log_file_dir, region_name, region_id, config).await;
assert_eq!(region_name, region.name()); assert_eq!(region_name, region.name());
@@ -648,6 +662,7 @@ mod tests {
// Flush memtable to sst. // Flush memtable to sst.
region.flush(&FlushContext::default()).await.unwrap(); region.flush(&FlushContext::default()).await.unwrap();
let ctx = EngineContext::default();
engine engine
.close_region(&ctx, region.name(), &CloseOptions::default()) .close_region(&ctx, region.name(), &CloseOptions::default())
.await .await

View File

@@ -38,7 +38,7 @@ pub enum Error {
#[snafu(display("Invalid region descriptor, region: {}, source: {}", region, source))] #[snafu(display("Invalid region descriptor, region: {}, source: {}", region, source))]
InvalidRegionDesc { InvalidRegionDesc {
region: String, region: String,
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
@@ -53,7 +53,7 @@ pub enum Error {
#[snafu(display("Failed to write to buffer, source: {}", source))] #[snafu(display("Failed to write to buffer, source: {}", source))]
WriteBuffer { WriteBuffer {
#[snafu(backtrace)] location: Location,
source: common_datasource::error::Error, source: common_datasource::error::Error,
}, },
@@ -147,7 +147,7 @@ pub enum Error {
))] ))]
WriteWal { WriteWal {
region_id: RegionId, region_id: RegionId,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -218,7 +218,7 @@ pub enum Error {
#[snafu(display("Failed to read WAL, region_id: {}, source: {}", region_id, source))] #[snafu(display("Failed to read WAL, region_id: {}, source: {}", region_id, source))]
ReadWal { ReadWal {
region_id: RegionId, region_id: RegionId,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -229,7 +229,7 @@ pub enum Error {
))] ))]
MarkWalObsolete { MarkWalObsolete {
region_id: u64, region_id: u64,
#[snafu(backtrace)] location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -265,14 +265,14 @@ pub enum Error {
#[snafu(display("Failed to convert store schema, file: {}, source: {}", file, source))] #[snafu(display("Failed to convert store schema, file: {}, source: {}", file, source))]
ConvertStoreSchema { ConvertStoreSchema {
file: String, file: String,
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
#[snafu(display("Invalid raw region metadata, region: {}, source: {}", region, source))] #[snafu(display("Invalid raw region metadata, region: {}, source: {}", region, source))]
InvalidRawRegion { InvalidRawRegion {
region: String, region: String,
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
@@ -281,13 +281,13 @@ pub enum Error {
#[snafu(display("Invalid projection, source: {}", source))] #[snafu(display("Invalid projection, source: {}", source))]
InvalidProjection { InvalidProjection {
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
#[snafu(display("Failed to push data to batch builder, source: {}", source))] #[snafu(display("Failed to push data to batch builder, source: {}", source))]
PushBatch { PushBatch {
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -297,19 +297,19 @@ pub enum Error {
#[snafu(display("Failed to filter column {}, source: {}", name, source))] #[snafu(display("Failed to filter column {}, source: {}", name, source))]
FilterColumn { FilterColumn {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
#[snafu(display("Invalid alter request, source: {}", source))] #[snafu(display("Invalid alter request, source: {}", source))]
InvalidAlterRequest { InvalidAlterRequest {
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
#[snafu(display("Failed to alter metadata, source: {}", source))] #[snafu(display("Failed to alter metadata, source: {}", source))]
AlterMetadata { AlterMetadata {
#[snafu(backtrace)] location: Location,
source: MetadataError, source: MetadataError,
}, },
@@ -320,7 +320,7 @@ pub enum Error {
))] ))]
CreateDefault { CreateDefault {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -353,7 +353,7 @@ pub enum Error {
))] ))]
CreateDefaultToRead { CreateDefaultToRead {
column: String, column: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -367,7 +367,7 @@ pub enum Error {
))] ))]
ConvertChunk { ConvertChunk {
name: String, name: String,
#[snafu(backtrace)] location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -376,7 +376,7 @@ pub enum Error {
#[snafu(display("Failed to create record batch for write batch, source:{}", source))] #[snafu(display("Failed to create record batch for write batch, source:{}", source))]
CreateRecordBatch { CreateRecordBatch {
#[snafu(backtrace)] location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
@@ -451,13 +451,13 @@ pub enum Error {
#[snafu(display("Failed to start manifest gc task: {}", source))] #[snafu(display("Failed to start manifest gc task: {}", source))]
StartManifestGcTask { StartManifestGcTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },
#[snafu(display("Failed to stop manifest gc task: {}", source))] #[snafu(display("Failed to stop manifest gc task: {}", source))]
StopManifestGcTask { StopManifestGcTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },
@@ -475,7 +475,7 @@ pub enum Error {
#[snafu(display("Failed to calculate SST expire time, source: {}", source))] #[snafu(display("Failed to calculate SST expire time, source: {}", source))]
TtlCalculation { TtlCalculation {
#[snafu(backtrace)] location: Location,
source: common_time::error::Error, source: common_time::error::Error,
}, },
@@ -501,13 +501,13 @@ pub enum Error {
#[snafu(display("Failed to start picking task for flush: {}", source))] #[snafu(display("Failed to start picking task for flush: {}", source))]
StartPickTask { StartPickTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },
#[snafu(display("Failed to stop picking task for flush: {}", source))] #[snafu(display("Failed to stop picking task for flush: {}", source))]
StopPickTask { StopPickTask {
#[snafu(backtrace)] location: Location,
source: RuntimeError, source: RuntimeError,
}, },

View File

@@ -143,7 +143,7 @@ mod tests {
&[(Some(1), Some(1)), (Some(2), Some(2))], &[(Some(1), Some(1)), (Some(2), Some(2))],
); );
let iter = memtable.iter(&IterContext::default()).unwrap(); let iter = memtable.iter(IterContext::default()).unwrap();
let sst_path = "table1"; let sst_path = "table1";
let layer = Arc::new(FsAccessLayer::new(sst_path, os.clone())); let layer = Arc::new(FsAccessLayer::new(sst_path, os.clone()));
let sst_info = layer let sst_info = layer

View File

@@ -19,6 +19,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use common_telemetry::{logging, timer}; use common_telemetry::{logging, timer};
use metrics::counter;
pub use picker::{FlushPicker, PickerConfig}; pub use picker::{FlushPicker, PickerConfig};
pub use scheduler::{ pub use scheduler::{
FlushHandle, FlushRegionRequest, FlushRequest, FlushScheduler, FlushSchedulerRef, FlushHandle, FlushRegionRequest, FlushRequest, FlushScheduler, FlushSchedulerRef,
@@ -32,7 +33,7 @@ use crate::error::Result;
use crate::manifest::action::*; use crate::manifest::action::*;
use crate::manifest::region::RegionManifest; use crate::manifest::region::RegionManifest;
use crate::memtable::{IterContext, MemtableId, MemtableRef}; use crate::memtable::{IterContext, MemtableId, MemtableRef};
use crate::metrics::FLUSH_ELAPSED; use crate::metrics::{FLUSH_BYTES_TOTAL, FLUSH_ELAPSED};
use crate::region::{RegionWriterRef, SharedDataRef}; use crate::region::{RegionWriterRef, SharedDataRef};
use crate::sst::{AccessLayerRef, FileId, FileMeta, Source, SstInfo, WriteOptions}; use crate::sst::{AccessLayerRef, FileId, FileMeta, Source, SstInfo, WriteOptions};
use crate::wal::Wal; use crate::wal::Wal;
@@ -266,7 +267,7 @@ impl<S: LogStore> FlushJob<S> {
let file_id = FileId::random(); let file_id = FileId::random();
// TODO(hl): Check if random file name already exists in meta. // TODO(hl): Check if random file name already exists in meta.
let iter = m.iter(&iter_ctx)?; let iter = m.iter(iter_ctx.clone())?;
let sst_layer = self.sst_layer.clone(); let sst_layer = self.sst_layer.clone();
let write_options = WriteOptions { let write_options = WriteOptions {
sst_write_buffer_size: self.engine_config.sst_write_buffer_size, sst_write_buffer_size: self.engine_config.sst_write_buffer_size,
@@ -297,6 +298,9 @@ impl<S: LogStore> FlushJob<S> {
.flatten() .flatten()
.collect(); .collect();
let flush_bytes = metas.iter().map(|f| f.file_size).sum();
counter!(FLUSH_BYTES_TOTAL, flush_bytes);
let file_ids = metas.iter().map(|f| f.file_id).collect::<Vec<_>>(); let file_ids = metas.iter().map(|f| f.file_id).collect::<Vec<_>>();
logging::info!("Successfully flush memtables, region:{region_id}, files: {file_ids:?}"); logging::info!("Successfully flush memtables, region:{region_id}, files: {file_ids:?}");
Ok(metas) Ok(metas)
@@ -308,6 +312,7 @@ impl<S: LogStore> FlushJob<S> {
flushed_sequence: Some(self.flush_sequence), flushed_sequence: Some(self.flush_sequence),
files_to_add: file_metas.to_vec(), files_to_add: file_metas.to_vec(),
files_to_remove: Vec::default(), files_to_remove: Vec::default(),
compaction_time_window: None,
}; };
self.writer self.writer

View File

@@ -38,8 +38,6 @@ pub struct RawRegionMetadata {
pub columns: RawColumnsMetadata, pub columns: RawColumnsMetadata,
pub column_families: RawColumnFamiliesMetadata, pub column_families: RawColumnFamiliesMetadata,
pub version: VersionNumber, pub version: VersionNumber,
/// Time window for compaction
pub compaction_time_window: Option<i64>,
} }
/// Minimal data that could be used to persist and recover [ColumnsMetadata](crate::metadata::ColumnsMetadata). /// Minimal data that could be used to persist and recover [ColumnsMetadata](crate::metadata::ColumnsMetadata).
@@ -78,6 +76,7 @@ pub struct RegionEdit {
pub flushed_sequence: Option<SequenceNumber>, pub flushed_sequence: Option<SequenceNumber>,
pub files_to_add: Vec<FileMeta>, pub files_to_add: Vec<FileMeta>,
pub files_to_remove: Vec<FileMeta>, pub files_to_remove: Vec<FileMeta>,
pub compaction_time_window: Option<i64>,
} }
/// The region version checkpoint /// The region version checkpoint
@@ -382,6 +381,7 @@ mod tests {
flushed_sequence: Some(99), flushed_sequence: Some(99),
files_to_add: files.clone(), files_to_add: files.clone(),
files_to_remove: vec![], files_to_remove: vec![],
compaction_time_window: None,
}, },
); );
builder.apply_edit( builder.apply_edit(
@@ -391,6 +391,7 @@ mod tests {
flushed_sequence: Some(100), flushed_sequence: Some(100),
files_to_add: vec![], files_to_add: vec![],
files_to_remove: vec![files[0].clone()], files_to_remove: vec![files[0].clone()],
compaction_time_window: None,
}, },
); );

View File

@@ -36,7 +36,7 @@ use crate::manifest::checkpoint::Checkpointer;
use crate::manifest::storage::{ManifestObjectStore, ObjectStoreLogIterator}; use crate::manifest::storage::{ManifestObjectStore, ObjectStoreLogIterator};
const CHECKPOINT_ACTIONS_MARGIN: u16 = 10; const CHECKPOINT_ACTIONS_MARGIN: u16 = 10;
const GC_DURATION_SECS: u64 = 30; const GC_DURATION_SECS: u64 = 600;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ManifestImpl<S: Checkpoint<Error = Error>, M: MetaAction<Error = Error>> { pub struct ManifestImpl<S: Checkpoint<Error = Error>, M: MetaAction<Error = Error>> {

View File

@@ -183,7 +183,8 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use common_test_util::temp_dir::create_temp_dir; use common_test_util::temp_dir::create_temp_dir;
use object_store::services::Fs; use object_store::services::{Fs, S3};
use object_store::test_util::{s3_test_config, TempFolder};
use object_store::ObjectStore; use object_store::ObjectStore;
use store_api::manifest::action::ProtocolAction; use store_api::manifest::action::ProtocolAction;
use store_api::manifest::{Manifest, MetaActionIterator, MAX_VERSION}; use store_api::manifest::{Manifest, MetaActionIterator, MAX_VERSION};
@@ -195,17 +196,36 @@ mod tests {
use crate::sst::FileId; use crate::sst::FileId;
#[tokio::test] #[tokio::test]
async fn test_region_manifest_compress() { async fn test_fs_region_manifest_compress() {
test_region_manifest(true).await let manifest = new_fs_manifest(true, None).await;
test_region_manifest(&manifest).await
} }
#[tokio::test] #[tokio::test]
async fn test_region_manifest_uncompress() { async fn test_fs_region_manifest_uncompress() {
test_region_manifest(false).await let manifest = new_fs_manifest(false, None).await;
test_region_manifest(&manifest).await
} }
async fn test_region_manifest(compress: bool) { #[tokio::test]
common_telemetry::init_default_ut_logging(); async fn test_s3_region_manifest_compress() {
if s3_test_config().is_some() {
let (manifest, temp_dir) = new_s3_manifest(true, None).await;
test_region_manifest(&manifest).await;
temp_dir.remove_all().await.unwrap();
}
}
#[tokio::test]
async fn test_s3_region_manifest_uncompress() {
if s3_test_config().is_some() {
let (manifest, temp_dir) = new_s3_manifest(false, None).await;
test_region_manifest(&manifest).await;
temp_dir.remove_all().await.unwrap();
}
}
async fn new_fs_manifest(compress: bool, gc_duration: Option<Duration>) -> RegionManifest {
let tmp_dir = create_temp_dir("test_region_manifest"); let tmp_dir = create_temp_dir("test_region_manifest");
let mut builder = Fs::default(); let mut builder = Fs::default();
builder.root(&tmp_dir.path().to_string_lossy()); builder.root(&tmp_dir.path().to_string_lossy());
@@ -216,9 +236,43 @@ mod tests {
object_store, object_store,
manifest_compress_type(compress), manifest_compress_type(compress),
None, None,
None, gc_duration,
); );
manifest.start().await.unwrap(); manifest.start().await.unwrap();
manifest
}
async fn new_s3_manifest(
compress: bool,
gc_duration: Option<Duration>,
) -> (RegionManifest, TempFolder) {
let s3_config = s3_test_config().unwrap();
let mut builder = S3::default();
builder
.root(&s3_config.root)
.access_key_id(&s3_config.access_key_id)
.secret_access_key(&s3_config.secret_access_key)
.bucket(&s3_config.bucket);
if s3_config.region.is_some() {
builder.region(s3_config.region.as_ref().unwrap());
}
let store = ObjectStore::new(builder).unwrap().finish();
let temp_folder = TempFolder::new(&store, "/");
let manifest = RegionManifest::with_checkpointer(
"/manifest/",
store,
manifest_compress_type(compress),
None,
gc_duration,
);
manifest.start().await.unwrap();
(manifest, temp_folder)
}
async fn test_region_manifest(manifest: &RegionManifest) {
common_telemetry::init_default_ut_logging();
let region_meta = Arc::new(build_region_meta()); let region_meta = Arc::new(build_region_meta());
@@ -325,30 +379,48 @@ mod tests {
} }
#[tokio::test] #[tokio::test]
async fn test_region_manifest_checkpoint_compress() { async fn test_fs_region_manifest_checkpoint_compress() {
test_region_manifest_checkpoint(true).await let duration = Duration::from_millis(50);
let manifest = new_fs_manifest(true, Some(duration)).await;
test_region_manifest_checkpoint(&manifest, duration).await
} }
#[tokio::test] #[tokio::test]
async fn test_region_manifest_checkpoint_uncompress() { async fn test_fs_region_manifest_checkpoint_uncompress() {
test_region_manifest_checkpoint(false).await let duration = Duration::from_millis(50);
let manifest = new_fs_manifest(false, Some(duration)).await;
test_region_manifest_checkpoint(&manifest, duration).await
} }
async fn test_region_manifest_checkpoint(compress: bool) { #[tokio::test]
common_telemetry::init_default_ut_logging(); async fn test_s3_region_manifest_checkpoint_compress() {
let tmp_dir = create_temp_dir("test_region_manifest_checkpoint"); if s3_test_config().is_some() {
let mut builder = Fs::default(); let duration = Duration::from_millis(50);
builder.root(&tmp_dir.path().to_string_lossy()); let (manifest, temp_dir) = new_s3_manifest(true, Some(duration)).await;
let object_store = ObjectStore::new(builder).unwrap().finish();
let manifest = RegionManifest::with_checkpointer( test_region_manifest_checkpoint(&manifest, duration).await;
"/manifest/", temp_dir.remove_all().await.unwrap();
object_store, }
manifest_compress_type(compress), }
None,
Some(Duration::from_millis(50)), #[tokio::test]
); async fn test_s3_region_manifest_checkpoint_uncompress() {
manifest.start().await.unwrap(); if s3_test_config().is_some() {
let duration = Duration::from_millis(50);
let (manifest, temp_dir) = new_s3_manifest(false, Some(duration)).await;
test_region_manifest_checkpoint(&manifest, duration).await;
temp_dir.remove_all().await.unwrap();
}
}
async fn test_region_manifest_checkpoint(
manifest: &RegionManifest,
test_gc_duration: Duration,
) {
common_telemetry::init_default_ut_logging();
let region_meta = Arc::new(build_region_meta()); let region_meta = Arc::new(build_region_meta());
let new_region_meta = Arc::new(build_altered_region_meta()); let new_region_meta = Arc::new(build_altered_region_meta());
@@ -375,7 +447,7 @@ mod tests {
manifest.update(action).await.unwrap(); manifest.update(action).await.unwrap();
} }
assert!(manifest.last_checkpoint().await.unwrap().is_none()); assert!(manifest.last_checkpoint().await.unwrap().is_none());
assert_scan(&manifest, 0, 3).await; assert_scan(manifest, 0, 3).await;
// update flushed manifest version for doing checkpoint // update flushed manifest version for doing checkpoint
manifest.set_flushed_manifest_version(2); manifest.set_flushed_manifest_version(2);
@@ -434,7 +506,7 @@ mod tests {
manifest.update(action).await.unwrap(); manifest.update(action).await.unwrap();
} }
assert_scan(&manifest, 3, 2).await; assert_scan(manifest, 3, 2).await;
// do another checkpoints // do another checkpoints
// compacted RegionChange // compacted RegionChange
@@ -458,7 +530,7 @@ mod tests {
files.contains_key(&file_ids[1]) && files.contains_key(&file_ids[1]) &&
*metadata == RawRegionMetadata::from(region_meta.as_ref()))); *metadata == RawRegionMetadata::from(region_meta.as_ref())));
assert_scan(&manifest, 4, 1).await; assert_scan(manifest, 4, 1).await;
// compacted RegionEdit // compacted RegionEdit
manifest.set_flushed_manifest_version(4); manifest.set_flushed_manifest_version(4);
let checkpoint = manifest.do_checkpoint().await.unwrap().unwrap(); let checkpoint = manifest.do_checkpoint().await.unwrap().unwrap();
@@ -492,7 +564,7 @@ mod tests {
); );
// wait for gc // wait for gc
tokio::time::sleep(Duration::from_millis(60)).await; tokio::time::sleep(test_gc_duration * 3).await;
for v in checkpoint_versions { for v in checkpoint_versions {
if v < 4 { if v < 4 {

View File

@@ -71,5 +71,6 @@ pub fn build_region_edit(
file_size: DEFAULT_TEST_FILE_SIZE, file_size: DEFAULT_TEST_FILE_SIZE,
}) })
.collect(), .collect(),
compaction_time_window: None,
} }
} }

View File

@@ -73,7 +73,7 @@ pub trait Memtable: Send + Sync + fmt::Debug {
fn write(&self, kvs: &KeyValues) -> Result<()>; fn write(&self, kvs: &KeyValues) -> Result<()>;
/// Iterates the memtable. /// Iterates the memtable.
fn iter(&self, ctx: &IterContext) -> Result<BoxedBatchIterator>; fn iter(&self, ctx: IterContext) -> Result<BoxedBatchIterator>;
/// Returns the estimated bytes allocated by this memtable from heap. Result /// Returns the estimated bytes allocated by this memtable from heap. Result
/// of this method may be larger than the estimated based on [`num_rows`] because /// of this method may be larger than the estimated based on [`num_rows`] because

Some files were not shown because too many files have changed in this diff Show More