Compare commits

...

52 Commits

Author SHA1 Message Date
ZonaHe
d2c90b4c59 feat: update dashboard to v0.2.2 (#1426)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-04-19 19:09:17 +08:00
Weny Xu
5a05e3107c feat: implement parsing format from hashmap (#1420)
* feat: implement parsing format from hashmap

* chore: apply suggestions from CR
2023-04-19 16:29:31 +08:00
Hao
e4cd08c750 feat: add table id and engine to information_schema.TABLES (#1407)
* feat: add table id and engine to informatin_schema.TABLES

* Update src/catalog/src/information_schema/tables.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: change table_engine to engine

* test: update sqlness for information schema

* test: update information_schema test in frontend::tests::instance_test.rs

* fix: github action sqlness information_schema test fail

* test: ignore table_id in information_schema

* test: support distribute and standalone have different output

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-04-19 10:52:02 +08:00
Ruihang Xia
e8bb00f0be feat: impl instant query interface (#1410)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-18 23:25:14 +08:00
LFC
ff2784da0f test: add SELECT ... LIMIT ... test cases for distributed mode (#1419) 2023-04-18 23:05:43 +08:00
liyang
4652b62481 chore: use alicloud imagehub (#1418)
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-04-18 14:35:14 +00:00
Lei, HUANG
0e4d4f0300 chore: release 0.2.0 (#1413)
* chore: bump version to v0.2.0

* chore: bump dashboard to v0.2.1

* chore: remove push uhub step

* fix: static assets path prefix
2023-04-18 22:12:13 +08:00
shuiyisong
145f8eb5a7 refactor: parallelize open table (#1392)
* refactor: change open_table to parallel on datanode startup

* chore: try move out register schema table

* chore: change mito engine to key lock

* chore: minor change

* chore: minor change

* chore: update error definition

* chore: remove rwlock on tables

* chore: try parallel register table on schema provider

* chore: add rt log

* chore: add region open rt log

* chore: add actual open region rt log

* chore: add recover rt log

* chore: divide to three part rt log

* chore: remove debug log

* chore: add replay rt log

* chore: update cargo lock

* chore: remove debug log

* chore: revert unused change

* chore: update err msg

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: fix cr issue

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: fix cr issue

* chore: fix cr issue

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-04-18 21:36:29 +08:00
discord9
de8b889701 chore: update RustPython depend (#1406)
* chore: update RustPython to newer version

* chore: bump ver

* chore: PR advices
2023-04-18 15:39:57 +08:00
Lei, HUANG
1c65987026 chore: remove Release prefix from release name (#1409) 2023-04-18 06:25:08 +00:00
Near
c6f024a171 feat: Add metrics for cache hit/miss for object store cache (#1405)
* Add the cache hit/miss counter

* Verify the cache metrics are included

* Resolve comments

* Rename the error kind label name to be consistent with other metrics

* Rename the object store metric names

* Avoid using glob imports

* Format the code

* chore: Update src/object-store/src/metrics.rs mod doc

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-04-18 14:08:19 +08:00
localhost
0c88bb09e3 chore: add some metrics for grpc client (#1398)
* chore: add some metrics for grpc client

* chore: add grpc preix and change metrics-exporter-ptometheus to add global prefix

---------

Co-authored-by: paomian <qtang@greptime.com>
2023-04-18 13:55:01 +08:00
Ruihang Xia
f4190cfca6 fix: table scan without projection (#1404)
* fix: table scan without projection

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update PR reference

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-17 20:10:36 +08:00
zyy17
b933ffddd0 ci: set whether it is the latest release by using 'ncipollo/release-action and update install.sh (#1400)
* ci: set whether it is the latest release by using 'ncipollo/release-action'

* ci: modify greptimedb install script to use the latest nightly version binary
2023-04-17 18:44:00 +08:00
Lei, HUANG
1214b5b43e docs: fix timestamp rendering in readme (#1399)
doc: fix timestamp rendering in readme
2023-04-17 17:07:25 +08:00
Ruihang Xia
a47134a971 chore: don't render reproduce as shell in issue template (#1397)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-17 16:42:51 +08:00
Ruihang Xia
dc85a4b5bb feat: migrate substrait to datafusion official implementation (#1238)
* some test cases will fail

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert version changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update substrait-proto version

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update df again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/common/substrait/Cargo.toml

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* revert COPY FROM / COPY TO sqlness to standalone only

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-04-17 15:54:35 +08:00
Yingwen
0937ccdb61 docs: Add docs about schema structs (#1373)
* docs: Add docs about schema structs

* docs: refine schema struct docs

- Describe SchemaRef and relationship between our schema and arrow's.
- Add more examples

* docs: Add code link to schemas

* docs: Add conversion graph

* docs: Apply suggestions from code review

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-04-17 12:11:24 +08:00
Weny Xu
408de51be8 feat: implement JsonOpener and CsvOpener (#1367)
* feat: introduce JsonOpener and CsvOpener

* refactor: refactor Opener

* docs: add doc
2023-04-17 11:42:16 +08:00
LFC
f7b7a9c801 feat: implement COPY for cluster (#1388) 2023-04-17 11:04:47 +08:00
Weny Xu
cc7c313937 chore: fix clippy (#1387) 2023-04-15 07:00:54 +08:00
Ruihang Xia
a6e41cdd7b chore: bump arrow, parquet, datafusion and tonic (#1386)
* bump arrow, parquet, datafusion, tonic and greptime-proto

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add analyzer and fix test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-15 00:03:15 +08:00
Hao
a5771e2ec3 feat: implement predict_linear function in promql (#1362)
* feat: implement predict_linear function in promql

* feat: initialize predict_linear's planner

* fix(bug): fix a bug in linear regression and add some unit test for linear regression

* chore: format code

* feat: deal with NULL value in linear_regression

* feat: add test for all value is None
2023-04-14 22:26:37 +08:00
Lei, HUANG
68e64a6ce9 feat: add some metrics (#1384)
* feat: add some metrics

* fix: compile errors
2023-04-14 20:46:45 +08:00
Ning Sun
90cd3bb5c9 chore: switch mysql_async to git dep (#1383) 2023-04-14 07:04:34 +00:00
shuiyisong
bea37e30d8 chore: query prom using input query context (#1381) 2023-04-14 14:23:36 +08:00
Yingwen
d988b43996 feat: Add drop table procedure to mito (#1377)
* feat: Add drop table procedure to mito

* feat: remove table from engine and then close it
2023-04-14 13:09:38 +08:00
LFC
0fc816fb0c test: add "numbers" table in distributed mode (#1374) 2023-04-14 11:52:04 +08:00
Ning Sun
43391e0162 chore: update pgwire and rustls libraries (#1380)
* feat: update pgwire to 0.13 and fix grafana compatibility

* chore: update pgwire and rustls

* chore: remove unsued imports

* style: format toml
2023-04-14 11:06:01 +08:00
Ruihang Xia
3e7f7e3e8d fix: compile error in develop branch (#1376)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-13 15:19:00 +08:00
Yingwen
0819582a26 feat: Add alter table procedure (#1354)
* feat: Implement AlterTableProcedure

* test: Test alter table procedure

* feat: support alter table by procedure in datanode

* chore: update comment
2023-04-13 14:05:53 +08:00
Lei, HUANG
9fa871a3fa fix: concurrent rename two table to same name may cause override (#1368)
* fix: concurrent rename two table to same name may cause override

* fix: concurrently update system catalog table

* fix: correctness
2023-04-13 11:53:02 +08:00
Lei, HUANG
76640402ba fix: update cargo lock (#1375) 2023-04-13 11:08:35 +08:00
discord9
c20dbda598 feat: from/to numpy&collect concat (#1339)
* feat: from/to numpy&collect concat

* feat: PyRecordBatch

* test: try import first,allow w/out numpy/pyarrow

* fix: cond compile flag

* doc: license

* feat: sql() ret PyRecordBatch&repr

* fix: after merge

* style: fmt

* chore: CR advices

* docs: update

* chore: resolve conflict
2023-04-13 10:46:25 +08:00
LFC
33dbf7264f refactor: unify the execution of show stmt (#1340)
* refactor: unify the execution of show stmt
2023-04-12 23:09:07 +08:00
discord9
716bde8f04 feat: benchmark some python script (#1356)
* test: bench rspy&pyo3

* docs: add TODO

* api heavy

* Update src/script/benches/py_benchmark.rs

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* style: toml fmt

* test: use `rayon` for threadpool

* test: compile first, run later

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2023-04-12 18:19:02 +08:00
ZonaHe
9f2825495d feat: update dashboard to v0.1.0 (#1370)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-04-12 17:08:10 +08:00
localhost
ae21c1c1e9 chore: set keep lease heartbeat log level to trace (#1364)
Co-authored-by: paomian <qtang@greptime.com>
2023-04-12 09:38:49 +08:00
Ruihang Xia
6b6617f9cb build: specify clippy denies in cargo config (#1351)
* build: specify clippy denies in cargo config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* deny implicit clone

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-11 09:48:52 +00:00
shuiyisong
d5f0ba4ad9 refactor: merge authenticate and authorize api (#1360)
* chore: add auth api

* chore: update pg using auth api

* chore: update grpc using auth api

* chore: update http using auth api
2023-04-11 17:28:07 +08:00
Eugene Tolbakov
e021da2eee feat(promql): add holt_winters initial implementation (#1342)
* feat(promql): add holt_winters initial implementation

* feat(promql): improve docs for holt_winters

* feat(promql): adjust holt_winters implementation according to code review

* feat(promql): add holt_winters test from prometheus promql function test suite

* feat(promql): add holt_winters more tests from prometheus promql function test suite

* feat(promql): fix styling issue

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-11 17:04:35 +08:00
Weny Xu
fac9c17a9b feat: implement infer schema from single file (#1348)
* feat: implement infer schema from file

* feat: implement compression type

* refactor: remove unnecessary BufReader

* refactor: remove SyncIoBridge and using tokio_util::io::SyncIoBridge instead

* chore: apply suggestions from CR
2023-04-11 16:59:30 +08:00
Weny Xu
dfc2a45de1 docs: treat slack as the first-class citizen (#1361) 2023-04-11 16:59:17 +08:00
Lei, HUANG
3e8ec8b73a fix: avoid panic when no region found in table (#1359) 2023-04-11 16:58:18 +08:00
Weny Xu
a90798a2c1 test: add tests for file table engine (#1353)
* test: add tests for file table engine

* test: refactor open table test and add close engine test
2023-04-11 06:25:08 +00:00
Lei, HUANG
f5cf5685cc feat!: parsing local timestamp (#1352)
* fix: parse and display timestamp/datetime in local time zone

* fix display

* fix: unit tests

* change time zone env

* fix: remove useless code
2023-04-11 12:54:15 +08:00
localhost
1a21a6ea41 chore: set metasrv and datanode heartbeat log level to trace (#1357) 2023-04-11 11:21:29 +08:00
Ruihang Xia
09f003d01d fix: lots of corner cases in PromQL (#1345)
* adjust plan ordering
fix offset logic
ignore empty range vector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: different NaN logic between instant and range selector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: enlarge selector time window

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert change about stale NaN

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename variables

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* one more rename

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-04-10 09:05:24 +00:00
Weny Xu
29c6155ae3 feat: introduce file table engine (#1323)
* feat: introduce file table engine

* chore: apply cr suggestions

* refactor: refactor immutable manifest

* chore: apply cr suggestions

* refactor: refactor immutable manifest

* chore: apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: apply suggestions from CR

* chore: apply suggestions from code review

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-04-10 12:03:36 +08:00
Weny Xu
804348966d chore: amend fmt-toml (#1347) 2023-04-10 11:42:36 +08:00
Lei, HUANG
b7bdee6de9 feat: ignoring time zone info when import from external files (#1341)
* feat: ignore timezone info when copy from external files

* chore: rebase onto develop
2023-04-10 11:41:34 +08:00
Lei, HUANG
c850e9695a fix: stream inserts when copying from external file (#1338)
* fix: stream inserts when copying from external file

* fix: reset pending bytes once insertion succeeds

* Update src/datanode/src/sql/copy_table_from.rs

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-04-10 10:44:12 +08:00
232 changed files with 9275 additions and 2454 deletions

View File

@@ -3,3 +3,13 @@ linker = "aarch64-linux-gnu-gcc"
[alias]
sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -81,6 +81,5 @@ body:
Please walk us through and provide steps and details on how
to reproduce the issue. If possible, provide scripts that we
can run to trigger the bug.
render: bash
validations:
required: true

View File

@@ -183,7 +183,7 @@ jobs:
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
run: cargo clippy --workspace --all-targets -- -D warnings
coverage:
if: github.event.pull_request.draft == false

View File

@@ -319,35 +319,50 @@ jobs:
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
- name: Set whether it is the latest release
run: |
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "prerelease=false" >> $GITHUB_ENV
echo "makeLatest=true" >> $GITHUB_ENV
else
echo "prerelease=true" >> $GITHUB_ENV
echo "makeLatest=false" >> $GITHUB_ENV
fi
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
uses: ncipollo/release-action@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
tag: ${{ env.SCHEDULED_BUILD_VERSION }}
generateReleaseNotes: true
artifacts: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
uses: ncipollo/release-action@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
generateReleaseNotes: true
artifacts: |
**/greptime-*
docker-push-uhub:
name: Push docker image to UCloud Container Registry
docker-push-acr:
name: Push docker image to alibaba cloud container registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
@@ -359,12 +374,12 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to UCloud Container Registry
- name: Login to alibaba cloud container registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
registry: registry.cn-hangzhou.aliyuncs.com
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
@@ -381,9 +396,9 @@ jobs:
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}

View File

@@ -51,7 +51,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks

2018
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -24,6 +24,7 @@ members = [
"src/common/time",
"src/datanode",
"src/datatypes",
"src/file-table-engine",
"src/frontend",
"src/log-store",
"src/meta-client",
@@ -46,39 +47,42 @@ members = [
]
[workspace.package]
version = "0.1.1"
version = "0.2.0"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "36.0" }
arrow-array = "36.0"
arrow-flight = "36.0"
arrow-schema = { version = "36.0", features = ["serde"] }
arrow = { version = "37.0" }
arrow-array = "37.0"
arrow-flight = "37.0"
arrow-schema = { version = "37.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
futures = "0.3"
futures-util = "0.3"
parquet = "36.0"
parquet = "37.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.32"
sqlparser = "0.33"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = "0.7"
tonic = { version = "0.8", features = ["tls"] }
tokio-util = { version = "0.7", features = ["io-util"] }
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
[profile.release]
debug = true

View File

@@ -21,6 +21,10 @@ fmt: ## Format all the Rust code.
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format --option "indent_string= "
.PHONY: check-toml
check-toml: ## Check all TOML files.
taplo format --check --option "indent_string= "
.PHONY: docker-image
@@ -47,7 +51,7 @@ check: ## Cargo check all the targets.
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
cargo clippy --workspace --all-targets -- -D warnings
.PHONY: fmt-check
fmt-check: ## Check code format.

View File

@@ -23,6 +23,8 @@
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
## What is GreptimeDB
@@ -129,16 +131,16 @@ about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
SELECT * FROM monitor;
```
```TEXT
+-------+---------------------+------+--------+
| host | ts | cpu | memory |
+-------+---------------------+------+--------+
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
+-------+---------------------+------+--------+
3 rows in set (0.01 sec)
```
```TEXT
+-------+--------------------------+------+--------+
| host | ts | cpu | memory |
+-------+--------------------------+------+--------+
| host1 | 2022-08-19 16:32:35+0800 | 66.6 | 1024 |
| host2 | 2022-08-19 16:32:36+0800 | 77.7 | 2048 |
| host3 | 2022-08-19 16:32:37+0800 | 88.8 | 4096 |
+-------+--------------------------+------+--------+
3 rows in set (0.03 sec)
```
You can always cleanup test database by removing `/tmp/greptimedb`.

View File

@@ -128,10 +128,10 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let (values, datatype) = build_values(array);
let column = Column {
column_name: field.name().to_owned(),
column_name: field.name().clone(),
values: Some(values),
null_mask: array
.data()
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
@@ -225,7 +225,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _, _)
| DataType::Union(_, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)

View File

@@ -0,0 +1,74 @@
This document introduces how to implement SQL statements in GreptimeDB.
The execution entry point for SQL statements locates at Frontend Instance. You can see it has
implemented `SqlQueryHandler`:
```rust
impl SqlQueryHandler for Instance {
type Error = Error;
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
// ...
}
}
```
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
will be feed into `StatementExecutor`:
```rust
// in Frontend Instance:
self.statement_executor.execute_sql(stmt, query_ctx).await
```
That's where we handle our SQL statements. You can just create a new match arm for your statement there, then the
statement is implemented for both GreptimeDB Standalone and Cluster. You can see how `DESCRIBE TABLE` is implemented as
an example.
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
Summarize as the diagram below:
```text
SQL query
|
v
+---------------------------+
| SqlQueryHandler::do_query |
+---------------------------+
|
| SQL parsing
v
+--------------------------------+
| StatementExecutor::execute_sql |
+--------------------------------+
|
| SQL execution
v
+----------------------------------+
| commonly handled statements like |
| "plan_exec" for selection or |
+----------------------------------+
| |
For Standalone | | For Cluster
v v
+---------------------------+ +---------------------------+
| SqlStatementExecutor impl | | SqlStatementExecutor impl |
| in Datanode Instance | | in Frontend DistInstance |
+---------------------------+ +---------------------------+
```
Note that some SQL statements can be executed in our QueryEngine, in the form of `LogicalPlan`. You can follow the
invocation path down to the `QueryEngine` implementation from `StatementExecutor::plan_exec`. For now, there's only
one `DatafusionQueryEngine` for both GreptimeDB Standalone and Cluster. That lone query engine works for both modes is
because GreptimeDB read/write data through `Table` trait, and each mode has its own `Table` implementation.
We don't have any bias towards whether statements should be handled in query engine or `StatementExecutor`. You can
implement one kind of statement in both places. For example, `Insert` with selection is handled in query engine, because
we can easily do the query part there. However, `Insert` without selection is not, for the cost of parsing statement
to `LogicalPlan` is not neglectable. So generally if the SQL query is simple enough, you can handle it
in `StatementExecutor`; otherwise if it is complex or has some part of selection, it should be parsed to `LogicalPlan`
and handled in query engine.

527
docs/schema-structs.md Normal file
View File

@@ -0,0 +1,527 @@
# Schema Structs
# Common Schemas
The `datatypes` crate defines the elementary schema struct to describe the metadata.
## ColumnSchema
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
```rust
pub struct ColumnSchema {
pub name: String,
pub data_type: ConcreteDataType,
is_nullable: bool,
is_time_index: bool,
default_constraint: Option<ColumnDefaultConstraint>,
metadata: Metadata,
}
```
## Schema
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
```rust
use arrow::datatypes::Schema as ArrowSchema;
pub struct Schema {
column_schemas: Vec<ColumnSchema>,
name_to_index: HashMap<String, usize>,
arrow_schema: Arc<ArrowSchema>,
timestamp_index: Option<usize>,
version: u32,
}
pub type SchemaRef = Arc<Schema>;
```
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
## RawSchema
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
```rust
pub struct RawSchema {
pub column_schemas: Vec<ColumnSchema>,
pub timestamp_index: Option<usize>,
pub version: u32,
}
```
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
# Schema of the Table
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
```rust
pub struct TableMeta {
pub schema: SchemaRef,
pub primary_key_indices: Vec<usize>,
pub value_indices: Vec<usize>,
// ...
}
```
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
Suppose we create a table with the following SQL
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
```
Then the table's `TableMeta` may look like this:
```json
{
"schema":{
"column_schemas":[
"ts",
"host",
"usage_user",
"usage_system",
"datacenter"
],
"time_index":0,
"version":0
},
"primary_key_indices":[
4,
1
],
"value_indices":[
2,
3
]
}
```
# Schemas of the storage engine
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
The storage engine maintains schemas of regions in more complicated ways because it
- adds internal columns that are invisible to users to store additional metadata for each row
- provides a data model similar to the key-value model so it organizes columns in a different order
- maintains additional metadata like column id or column family
So the storage engine defines several schema structs:
- RegionSchema
- StoreSchema
- ProjectedSchema
## RegionSchema
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
```rust
pub struct RegionSchema {
user_schema: SchemaRef,
store_schema: StoreSchemaRef,
columns: ColumnsMetadataRef,
}
```
Each region reserves some columns called `internal columns` for internal usage:
- `__sequence`, sequence number of a row
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
- User schema, a `Schema` struct that doesn't have internal columns
- Store schema, a `StoreSchema` struct that has internal columns
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
`RegionSchema` organizes columns in the following order:
```
key columns, timestamp, [__version,] value columns, __sequence, __op_type
```
We can ignore the `__version` column because it is disabled now:
```
key columns, timestamp, value columns, __sequence, __op_type
```
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
So the `RegionSchema` of our `cpu` table above looks like this:
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
"store_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
]
}
```
## StoreSchema
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
```rust
struct StoreSchema {
columns: Vec<ColumnMetadata>,
schema: SchemaRef,
row_key_end: usize,
user_column_end: usize,
}
```
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
The `StoreSchema` of the region above is similar to this:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
}
```
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
```rust
impl StoreSchema {
#[inline]
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
0..self.row_key_end
}
#[inline]
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
self.row_key_end..self.user_column_end
}
}
```
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
## ProjectedSchema
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
```rust
pub struct ProjectedSchema {
projection: Option<Projection>,
schema_to_read: StoreSchemaRef,
projected_user_schema: SchemaRef,
}
```
We need to handle many cases while doing projection:
- The columns' order of table and region is different
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
- We support `ALTER TABLE` so data files may have different schemas.
### Projection
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
select ts, usage_system from cpu;
```
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
}
```
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
So we can construct the following `ProjectedSchema`:
```json
{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":4
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system"
],
"time_index":0
}
}
```
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
### ReadAdapter
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
```rust
#[derive(Debug)]
pub struct ReadAdapter {
source_schema: StoreSchemaRef,
dest_schema: ProjectedSchemaRef,
indices_in_result: Vec<Option<usize>>,
is_source_needed: Vec<bool>,
}
```
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
Suppose we add a new column `usage_idle` to the table `cpu`.
```sql
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
```
The new `StoreSchema` becomes:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":6
}
```
Note that we bump the version of the schema to 1.
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
```json
{
"source_schema":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
},
"dest_schema":{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":5
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system",
"usage_idle"
],
"time_index":0
}
},
"indices_in_result":[
0,
1,
2,
3,
null,
4,
5
],
"is_source_needed":[
true,
true,
true,
false,
true,
true,
true
]
}
```
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
```text
┌──────────────────────────────┐
│ │
│ ┌────────────────────┐ │
│ │ store_schema │ │
│ │ │ │
│ │ StoreSchema │ │
│ │ version 1 │ │
│ └────────────────────┘ │
│ │
│ ┌────────────────────┐ │
│ │ user_schema │ │
│ └────────────────────┘ │
│ │
│ RegionSchema │
│ │
└──────────────┬───────────────┘
┌──────────────▼───────────────┐
│ │
│ ┌──────────────────────────┐ │
│ │ schema_to_read │ │
│ │ │ │
│ │ StoreSchema (projected) │ │
│ │ version 1 │ │
│ └──────────────────────────┘ │
┌───┤ ├───┐
│ │ ┌──────────────────────────┐ │ │
│ │ │ projected_user_schema │ │ │
│ │ └──────────────────────────┘ │ │
│ │ │ │
│ │ ProjectedSchema │ │
dest schema │ └──────────────────────────────┘ │ dest schema
│ │
│ │
┌──────▼───────┐ ┌───────▼──────┐
│ │ │ │
│ ReadAdapter │ │ ReadAdapter │
│ │ │ │
└──────▲───────┘ └───────▲──────┘
│ │
│ │
source schema │ │ source schema
│ │
┌───────┴─────────┐ ┌────────┴────────┐
│ │ │ │
│ ┌─────────────┐ │ │ ┌─────────────┐ │
│ │ │ │ │ │ │ │
│ │ StoreSchema │ │ │ │ StoreSchema │ │
│ │ │ │ │ │ │ │
│ │ version 0 │ │ │ │ version 1 │ │
│ │ │ │ │ │ │ │
│ └─────────────┘ │ │ └─────────────┘ │
│ │ │ │
│ SST 0 │ │ SST 1 │
│ │ │ │
└─────────────────┘ └─────────────────┘
```
# Conversion
This figure shows the conversion between schemas:
```text
┌─────────────┐ schema From ┌─────────────┐
│ ├──────────────────┐ ┌────────────────────────────► │
│ TableMeta │ │ │ │ RawSchema │
│ │ │ │ ┌─────────────────────────┤ │
└─────────────┘ │ │ │ TryFrom └─────────────┘
│ │ │
│ │ │
│ │ │
│ │ │
│ │ │
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
│ │ │ ├─────────────────────► │
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
│ │ │ │ ◄─────────────────────┤ │ │
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ └────────────────────────────────────────┐ │
│ │ │ │ │ │
│ columns │ user_schema() │ │ │
│ │ │ │ projected_user_schema() schema() │
│ │ │ │ │ │
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
│ │ RegionSchema │ │ ProjectedSchema │ │ │
│ │ ├─────────────────────────► │ │ │
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
│ │ └─────────────────────────────────────────► │ │
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
│ ◄──────────────────────────────────────────────┤ │
└─────────────────────────┘ columns └───────────────┘
```

View File

@@ -51,13 +51,17 @@ get_os_type
get_arch_type
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
# Use the latest nightly version.
if [ "${VERSION}" = "latest" ]; then
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest/download/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
else
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
if [ -z "${VERSION}" ]; then
echo "Failed to get the latest version."
exit 1
fi
fi
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
fi

View File

@@ -10,10 +10,10 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "59afacdae59eae4241cfaf851021361caaeaed21" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0bebe5f69c91cdfbce85cb8f45f9fcd28185261c" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true
[build-dependencies]
tonic-build = "0.8"
tonic-build = "0.9"

View File

@@ -24,6 +24,7 @@ datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util.workspace = true
key-lock = "0.1"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
parking_lot = "0.12"

View File

@@ -109,10 +109,16 @@ impl CatalogProvider for CatalogProviderAdapter {
}
///Greptime CatalogProvider -> datafusion's CatalogProvider
struct DfCatalogProviderAdapter {
pub struct DfCatalogProviderAdapter {
catalog_provider: CatalogProviderRef,
}
impl DfCatalogProviderAdapter {
pub fn new(catalog_provider: CatalogProviderRef) -> Self {
Self { catalog_provider }
}
}
impl DfCatalogProvider for DfCatalogProviderAdapter {
fn as_any(&self) -> &dyn Any {
self

View File

@@ -20,6 +20,7 @@ use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::Location;
use tokio::task::JoinError;
use crate::DeregisterTableRequest;
@@ -127,6 +128,9 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel, source: {}", source))]
ParallelOpenTable { source: JoinError },
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
@@ -261,7 +265,8 @@ impl ErrorExt for Error {
| Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
| Error::InvalidSystemTableDef { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }

View File

@@ -23,7 +23,7 @@ use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatch
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::StringVectorBuilder;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
use snafu::ResultExt;
use table::metadata::TableType;
@@ -44,6 +44,8 @@ impl InformationSchemaTables {
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
]));
Self {
schema,
@@ -73,6 +75,8 @@ struct InformationSchemaTablesBuilder {
schema_names: StringVectorBuilder,
table_names: StringVectorBuilder,
table_types: StringVectorBuilder,
table_ids: UInt32VectorBuilder,
engines: StringVectorBuilder,
}
impl InformationSchemaTablesBuilder {
@@ -85,6 +89,8 @@ impl InformationSchemaTablesBuilder {
schema_names: StringVectorBuilder::with_capacity(42),
table_names: StringVectorBuilder::with_capacity(42),
table_types: StringVectorBuilder::with_capacity(42),
table_ids: UInt32VectorBuilder::with_capacity(42),
engines: StringVectorBuilder::with_capacity(42),
}
}
@@ -100,7 +106,15 @@ impl InformationSchemaTablesBuilder {
let Some(schema) = self.catalog_provider.schema(&schema_name)? else { continue };
for table_name in schema.table_names()? {
let Some(table) = schema.table(&table_name).await? else { continue };
self.add_table(&catalog_name, &schema_name, &table_name, table.table_type());
let table_info = table.table_info();
self.add_table(
&catalog_name,
&schema_name,
&table_name,
table.table_type(),
Some(table_info.ident.table_id),
Some(&table_info.meta.engine),
);
}
}
@@ -110,6 +124,8 @@ impl InformationSchemaTablesBuilder {
INFORMATION_SCHEMA_NAME,
TABLES,
TableType::View,
None,
None,
);
self.finish()
@@ -121,6 +137,8 @@ impl InformationSchemaTablesBuilder {
schema_name: &str,
table_name: &str,
table_type: TableType,
table_id: Option<u32>,
engine: Option<&str>,
) {
self.catalog_names.push(Some(catalog_name));
self.schema_names.push(Some(schema_name));
@@ -130,6 +148,8 @@ impl InformationSchemaTablesBuilder {
TableType::View => "VIEW",
TableType::Temporary => "LOCAL TEMPORARY",
}));
self.table_ids.push(table_id);
self.engines.push(engine);
}
fn finish(&mut self) -> Result<RecordBatch> {
@@ -138,6 +158,8 @@ impl InformationSchemaTablesBuilder {
Arc::new(self.schema_names.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.table_types.finish()),
Arc::new(self.table_ids.finish()),
Arc::new(self.engines.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}

View File

@@ -419,6 +419,13 @@ impl CatalogManager for LocalCatalogManager {
schema: schema_name,
})?;
let _lock = self.register_lock.lock().await;
ensure!(
!schema.table_exist(&request.new_table_name)?,
TableExistsSnafu {
table: &request.new_table_name
}
);
let old_table = schema
.table(&request.table_name)
.await?
@@ -437,9 +444,11 @@ impl CatalogManager for LocalCatalogManager {
engine,
)
.await?;
Ok(schema
.rename_table(&request.table_name, request.new_table_name)
.is_ok())
let renamed = schema
.rename_table(&request.table_name, request.new_table_name.clone())
.is_ok();
Ok(renamed)
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {

View File

@@ -324,16 +324,20 @@ impl SchemaProvider for MemorySchemaProvider {
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
let mut tables = self.tables.write().unwrap();
if tables.get(name).is_some() {
let table = tables.remove(name).unwrap();
tables.insert(new_name, table.clone());
Ok(table)
} else {
TableNotFoundSnafu {
let Some(table) = tables.remove(name) else {
return TableNotFoundSnafu {
table_info: name.to_string(),
}
.fail()?
}
.fail()?;
};
let e = match tables.entry(new_name) {
Entry::Vacant(e) => e,
Entry::Occupied(e) => {
return TableExistsSnafu { table: e.key() }.fail();
}
};
e.insert(table.clone());
Ok(table)
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
@@ -459,7 +463,7 @@ mod tests {
assert!(schema.table_exist(table_name).unwrap());
// rename table
let new_table_name = "numbers";
let new_table_name = "numbers_new";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),

View File

@@ -26,20 +26,21 @@ use common_catalog::consts::{
use common_telemetry::{debug, error, info};
use dashmap::DashMap;
use futures::Stream;
use futures_util::StreamExt;
use futures_util::{StreamExt, TryStreamExt};
use key_lock::KeyLock;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::metadata::TableId;
use table::requests::{CreateTableRequest, OpenTableRequest};
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
SchemaNotFoundSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
ParallelOpenTableSnafu, Result, SchemaNotFoundSnafu, TableEngineNotFoundSnafu,
TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
@@ -255,15 +256,31 @@ impl RemoteCatalogManager {
) -> Result<()> {
info!("initializing tables in {}.{}", catalog_name, schema_name);
let mut table_num = 0;
let mut tables = self.iter_remote_tables(catalog_name, schema_name).await;
while let Some(r) = tables.next().await {
let (table_key, table_value) = r?;
let table_ref = self.open_or_create_table(&table_key, &table_value).await?;
schema.register_table(table_key.table_name.to_string(), table_ref)?;
info!("Registered table {}", &table_key.table_name);
max_table_id = max_table_id.max(table_value.table_id());
let tables = self.iter_remote_tables(catalog_name, schema_name).await;
let kvs = tables.try_collect::<Vec<_>>().await?;
let node_id = self.node_id;
let joins = kvs
.into_iter()
.map(|(table_key, table_value)| {
let engine_manager = self.engine_manager.clone();
common_runtime::spawn_bg(async move {
open_or_create_table(node_id, engine_manager, &table_key, &table_value).await
})
})
.collect::<Vec<_>>();
let vec = futures::future::join_all(joins).await;
for res in vec {
let table_ref = res.context(ParallelOpenTableSnafu)??;
let table_info = table_ref.table_info();
let table_name = &table_info.name;
let table_id = table_info.ident.table_id;
schema.register_table(table_name.clone(), table_ref)?;
info!("Registered table {}", table_name);
max_table_id = max_table_id.max(table_id);
table_num += 1;
}
info!(
"initialized tables in {}.{}, total: {}",
catalog_name, schema_name, table_num
@@ -311,87 +328,88 @@ impl RemoteCatalogManager {
info!("Created catalog '{catalog_key}");
Ok(catalog_provider)
}
}
async fn open_or_create_table(
&self,
table_key: &TableGlobalKey,
table_value: &TableGlobalValue,
) -> Result<TableRef> {
let context = EngineContext {};
let TableGlobalKey {
catalog_name,
schema_name,
table_name,
..
} = table_key;
async fn open_or_create_table(
node_id: u64,
engine_manager: TableEngineManagerRef,
table_key: &TableGlobalKey,
table_value: &TableGlobalValue,
) -> Result<TableRef> {
let context = EngineContext {};
let TableGlobalKey {
catalog_name,
schema_name,
table_name,
..
} = table_key;
let table_id = table_value.table_id();
let table_id = table_value.table_id();
let TableGlobalValue {
table_info,
regions_id_map,
..
} = table_value;
let TableGlobalValue {
table_info,
regions_id_map,
..
} = table_value;
// unwrap safety: checked in yielding this table when `iter_remote_tables`
let region_numbers = regions_id_map.get(&self.node_id).unwrap();
// unwrap safety: checked in yielding this table when `iter_remote_tables`
let region_numbers = regions_id_map.get(&node_id).unwrap();
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id,
};
let engine = self
.engine_manager
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id,
};
let engine =
engine_manager
.engine(&table_info.meta.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &table_info.meta.engine,
})?;
match engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
})? {
Some(table) => {
info!(
"Table opened: {}.{}.{}",
catalog_name, schema_name, table_name
);
Ok(table)
}
None => {
info!(
"Try create table: {}.{}.{}",
catalog_name, schema_name, table_name
);
match engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
})? {
Some(table) => {
info!(
"Table opened: {}.{}.{}",
catalog_name, schema_name, table_name
);
Ok(table)
}
None => {
info!(
"Try create table: {}.{}.{}",
catalog_name, schema_name, table_name
);
let meta = &table_info.meta;
let req = CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: meta.schema.clone(),
region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
engine: engine.name().to_string(),
};
let meta = &table_info.meta;
let req = CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: meta.schema.clone(),
region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
engine: engine.name().to_string(),
};
engine
.create_table(&context, req)
.await
.context(CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
&catalog_name, &schema_name, &table_name, table_id
),
})
}
engine
.create_table(&context, req)
.await
.context(CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
&catalog_name, &schema_name, &table_name, table_id
),
})
}
}
}
@@ -423,15 +441,6 @@ impl CatalogManager for RemoteCatalogManager {
})?;
handle_system_table_request(self, engine, &mut system_table_requests).await?;
info!("All system table opened");
self.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap()
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap()
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
Ok(())
}
@@ -747,8 +756,8 @@ pub struct RemoteSchemaProvider {
schema_name: String,
node_id: u64,
backend: KvBackendRef,
tables: Arc<ArcSwap<HashMap<String, TableRef>>>,
mutex: Arc<Mutex<()>>,
tables: Arc<ArcSwap<DashMap<String, TableRef>>>,
mutex: Arc<KeyLock<String>>,
}
impl RemoteSchemaProvider {
@@ -785,11 +794,16 @@ impl SchemaProvider for RemoteSchemaProvider {
}
fn table_names(&self) -> Result<Vec<String>> {
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
Ok(self
.tables
.load()
.iter()
.map(|en| en.key().clone())
.collect::<Vec<_>>())
}
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
Ok(self.tables.load().get(name).cloned())
Ok(self.tables.load().get(name).map(|en| en.value().clone()))
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
@@ -806,7 +820,7 @@ impl SchemaProvider for RemoteSchemaProvider {
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
let _guard = mutex.lock().await;
let _guard = mutex.lock(table_key.clone()).await;
backend
.set(
table_key.as_bytes(),
@@ -818,11 +832,8 @@ impl SchemaProvider for RemoteSchemaProvider {
table_key, table_value
);
let prev_tables = tables.load();
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
new_tables.clone_from(&prev_tables);
let prev = new_tables.insert(name, table);
tables.store(Arc::new(new_tables));
let tables = tables.load();
let prev = tables.insert(name, table);
Ok(prev)
})
})
@@ -846,18 +857,15 @@ impl SchemaProvider for RemoteSchemaProvider {
let tables = self.tables.clone();
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
let _guard = mutex.lock().await;
let _guard = mutex.lock(table_key.clone()).await;
backend.delete(table_key.as_bytes()).await?;
debug!(
"Successfully deleted catalog table entry, key: {}",
table_key
);
let prev_tables = tables.load();
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
new_tables.clone_from(&prev_tables);
let prev = new_tables.remove(&table_name);
tables.store(Arc::new(new_tables));
let tables = tables.load();
let prev = tables.remove(&table_name).map(|en| en.1);
Ok(prev)
})
})

View File

@@ -173,7 +173,6 @@ mod tests {
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
assert_eq!(vec!["numbers"], default_schema.table_names().unwrap());
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
@@ -209,14 +208,7 @@ mod tests {
table,
};
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([table_name, "numbers".to_string()]),
default_schema
.table_names()
.unwrap()
.into_iter()
.collect::<HashSet<_>>()
);
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
}
#[tokio::test]

View File

@@ -25,7 +25,7 @@ use arrow_flight::{FlightData, Ticket};
use common_error::prelude::*;
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
use common_query::Output;
use common_telemetry::logging;
use common_telemetry::{logging, timer};
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
@@ -33,7 +33,7 @@ use snafu::{ensure, ResultExt};
use crate::error::{
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
};
use crate::{error, Client, Result};
use crate::{error, metrics, Client, Result};
#[derive(Clone, Debug, Default)]
pub struct Database {
@@ -107,6 +107,7 @@ impl Database {
}
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
let mut client = self.client.make_database_client()?.inner;
let request = GreptimeRequest {
header: Some(RequestHeader {
@@ -130,6 +131,7 @@ impl Database {
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_SQL);
self.do_get(Request::Query(QueryRequest {
query: Some(Query::Sql(sql.to_string())),
}))
@@ -137,6 +139,7 @@ impl Database {
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
self.do_get(Request::Query(QueryRequest {
query: Some(Query::LogicalPlan(logical_plan)),
}))
@@ -150,6 +153,7 @@ impl Database {
end: &str,
step: &str,
) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
self.do_get(Request::Query(QueryRequest {
query: Some(Query::PromRangeQuery(PromRangeQuery {
query: promql.to_string(),
@@ -162,6 +166,7 @@ impl Database {
}
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
}))
@@ -169,6 +174,7 @@ impl Database {
}
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::Alter(expr)),
}))
@@ -176,6 +182,7 @@ impl Database {
}
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::DropTable(expr)),
}))
@@ -183,6 +190,7 @@ impl Database {
}
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_FLUSH_TABLE);
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(expr)),
}))
@@ -190,6 +198,8 @@ impl Database {
}
async fn do_get(&self, request: Request) -> Result<Output> {
// FIXME(paomian): should be added some labels for metrics
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),

View File

@@ -16,6 +16,7 @@ mod client;
mod database;
mod error;
pub mod load_balance;
mod metrics;
pub use api;
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};

24
src/client/src/metrics.rs Normal file
View File

@@ -0,0 +1,24 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! client metrics
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";

View File

@@ -27,3 +27,4 @@ pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
pub const SCRIPTS_TABLE_ID: u32 = 1;
pub const MITO_ENGINE: &str = "mito";
pub const IMMUTABLE_FILE_ENGINE: &str = "file";

View File

@@ -5,9 +5,26 @@ edition.workspace = true
license.workspace = true
[dependencies]
arrow.workspace = true
arrow-schema.workspace = true
async-compression = { version = "0.3", features = [
"bzip2",
"gzip",
"xz",
"zstd",
"futures-io",
"tokio",
] }
async-trait.workspace = true
bytes = "1.1"
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
datafusion.workspace = true
derive_builder = "0.12"
futures.workspace = true
object-store = { path = "../../object-store" }
regex = "1.7"
snafu.workspace = true
tokio.workspace = true
tokio-util.workspace = true
url = "2.3"

View File

@@ -0,0 +1,109 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use std::io;
use std::str::FromStr;
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
use bytes::Bytes;
use futures::Stream;
use tokio::io::{AsyncRead, BufReader};
use tokio_util::io::{ReaderStream, StreamReader};
use crate::error::{self, Error, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CompressionType {
/// Gzip-ed file
GZIP,
/// Bzip2-ed file
BZIP2,
/// Xz-ed file (liblzma)
XZ,
/// Zstd-ed file,
ZSTD,
/// Uncompressed file
UNCOMPRESSED,
}
impl FromStr for CompressionType {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let s = s.to_uppercase();
match s.as_str() {
"GZIP" | "GZ" => Ok(Self::GZIP),
"BZIP2" | "BZ2" => Ok(Self::BZIP2),
"XZ" => Ok(Self::XZ),
"ZST" | "ZSTD" => Ok(Self::ZSTD),
"" => Ok(Self::UNCOMPRESSED),
_ => error::UnsupportedCompressionTypeSnafu {
compression_type: s,
}
.fail(),
}
}
}
impl Display for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::GZIP => "GZIP",
Self::BZIP2 => "BZIP2",
Self::XZ => "XZ",
Self::ZSTD => "ZSTD",
Self::UNCOMPRESSED => "",
})
}
}
impl CompressionType {
pub const fn is_compressed(&self) -> bool {
!matches!(self, &Self::UNCOMPRESSED)
}
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn AsyncRead + Unpin + Send> {
match self {
CompressionType::GZIP => Box::new(GzipDecoder::new(BufReader::new(s))),
CompressionType::BZIP2 => Box::new(BzDecoder::new(BufReader::new(s))),
CompressionType::XZ => Box::new(XzDecoder::new(BufReader::new(s))),
CompressionType::ZSTD => Box::new(ZstdDecoder::new(BufReader::new(s))),
CompressionType::UNCOMPRESSED => Box::new(s),
}
}
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
match self {
CompressionType::GZIP => {
Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
}
CompressionType::BZIP2 => {
Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
}
CompressionType::XZ => {
Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
}
CompressionType::ZSTD => {
Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
}
CompressionType::UNCOMPRESSED => Box::new(s),
}
}
}

View File

@@ -21,6 +21,9 @@ use url::ParseError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unsupported compression type: {}", compression_type))]
UnsupportedCompressionType { compression_type: String },
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
@@ -33,12 +36,44 @@ pub enum Error {
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Failed to decompression, source: {}", source))]
Decompression {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
ReadObject {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Failed to read parquet source: {}", source))]
ReadParquetSnafu {
location: Location,
source: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to convert parquet to schema: {}", source))]
ParquetToSchema {
location: Location,
source: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to infer schema from file: {}, source: {}", path, source))]
InferSchema {
path: String,
location: Location,
source: arrow_schema::ArrowError,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
@@ -48,6 +83,25 @@ pub enum Error {
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
#[snafu(display("Failed to join handle: {}", source))]
JoinHandle {
location: Location,
source: tokio::task::JoinError,
},
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
ParseFormat {
key: &'static str,
value: String,
location: Location,
},
#[snafu(display("Failed to merge schema: {}", source))]
MergeSchema {
source: arrow_schema::ArrowError,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -56,13 +110,23 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
BuildBackend { .. } | ListObjects { .. } | ReadObject { .. } => {
StatusCode::StorageUnavailable
}
UnsupportedBackendProtocol { .. }
| UnsupportedCompressionType { .. }
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. } => StatusCode::InvalidArguments,
| InvalidPath { .. }
| InferSchema { .. }
| ReadParquetSnafu { .. }
| ParquetToSchema { .. }
| ParseFormat { .. }
| MergeSchema { .. } => StatusCode::InvalidArguments,
Decompression { .. } | JoinHandle { .. } => StatusCode::Unexpected,
}
}
@@ -71,14 +135,25 @@ impl ErrorExt for Error {
}
fn location_opt(&self) -> Option<common_error::snafu::Location> {
use Error::*;
match self {
Error::BuildBackend { location, .. } => Some(*location),
Error::ListObjects { location, .. } => Some(*location),
Error::UnsupportedBackendProtocol { .. }
| Error::EmptyHostPath { .. }
| Error::InvalidPath { .. }
| Error::InvalidUrl { .. }
| Error::InvalidConnection { .. } => None,
BuildBackend { location, .. } => Some(*location),
ReadObject { location, .. } => Some(*location),
ListObjects { location, .. } => Some(*location),
InferSchema { location, .. } => Some(*location),
ReadParquetSnafu { location, .. } => Some(*location),
ParquetToSchema { location, .. } => Some(*location),
Decompression { location, .. } => Some(*location),
JoinHandle { location, .. } => Some(*location),
ParseFormat { location, .. } => Some(*location),
MergeSchema { location, .. } => Some(*location),
UnsupportedBackendProtocol { .. }
| EmptyHostPath { .. }
| InvalidPath { .. }
| InvalidUrl { .. }
| InvalidConnection { .. }
| UnsupportedCompressionType { .. } => None,
}
}
}

View File

@@ -0,0 +1,126 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod csv;
pub mod json;
pub mod parquet;
#[cfg(test)]
pub mod tests;
pub const DEFAULT_SCHEMA_INFER_MAX_RECORD: usize = 1000;
use std::result;
use std::sync::Arc;
use std::task::Poll;
use arrow::record_batch::RecordBatch;
use arrow_schema::{ArrowError, Schema};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::file_format::FileOpenFuture;
use futures::StreamExt;
use object_store::ObjectStore;
use crate::compression::CompressionType;
use crate::error::Result;
pub const FORMAT_COMPRESSION_TYPE: &str = "COMPRESSION_TYPE";
pub const FORMAT_DELIMTERL: &str = "DELIMTERL";
pub const FORMAT_SCHEMA_INFER_MAX_RECORD: &str = "SCHEMA_INFER_MAX_RECORD";
pub const FORMAT_HAS_HEADER: &str = "FORMAT_HAS_HEADER";
#[async_trait]
pub trait FileFormat: Send + Sync + std::fmt::Debug {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<Schema>;
}
pub trait ArrowDecoder: Send + 'static {
/// Decode records from `buf` returning the number of bytes read.
///
/// This method returns `Ok(0)` once `batch_size` objects have been parsed since the
/// last call to [`Self::flush`], or `buf` is exhausted.
///
/// Any remaining bytes should be included in the next call to [`Self::decode`].
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError>;
/// Flushes the currently buffered data to a [`RecordBatch`].
///
/// This should only be called after [`Self::decode`] has returned `Ok(0)`,
/// otherwise may return an error if part way through decoding a record
///
/// Returns `Ok(None)` if no buffered data.
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError>;
}
impl ArrowDecoder for arrow::csv::reader::Decoder {
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
self.decode(buf)
}
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
self.flush()
}
}
impl ArrowDecoder for arrow::json::RawDecoder {
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
self.decode(buf)
}
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
self.flush()
}
}
pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
object_store: Arc<ObjectStore>,
path: String,
compression_type: CompressionType,
decoder_factory: F,
) -> DataFusionResult<FileOpenFuture> {
let mut decoder = decoder_factory()?;
Ok(Box::pin(async move {
let reader = object_store
.reader(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let mut upstream = compression_type.convert_stream(reader).fuse();
let mut buffered = Bytes::new();
let stream = futures::stream::poll_fn(move |cx| {
loop {
if buffered.is_empty() {
if let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) {
buffered = result?;
};
}
let decoded = decoder.decode(buffered.as_ref())?;
if decoded == 0 {
break;
} else {
buffered.advance(decoded);
}
}
Poll::Ready(decoder.flush().transpose())
});
Ok(stream.boxed())
}))
}

View File

@@ -0,0 +1,299 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
use arrow::csv;
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
use arrow_schema::{Schema, SchemaRef};
use async_trait::async_trait;
use common_runtime;
use datafusion::error::Result as DataFusionResult;
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
use derive_builder::Builder;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::io::SyncIoBridge;
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::file_format::{self, open_with_decoder, FileFormat};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct CsvFormat {
pub has_header: bool,
pub delimiter: u8,
pub schema_infer_max_record: Option<usize>,
pub compression_type: CompressionType,
}
impl TryFrom<&HashMap<String, String>> for CsvFormat {
type Error = error::Error;
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
let mut format = CsvFormat::default();
if let Some(delimiter) = value.get(file_format::FORMAT_DELIMTERL) {
// TODO(weny): considers to support parse like "\t" (not only b'\t')
format.delimiter = u8::from_str(delimiter).map_err(|_| {
error::ParseFormatSnafu {
key: file_format::FORMAT_DELIMTERL,
value: delimiter,
}
.build()
})?;
};
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
format.compression_type = CompressionType::from_str(compression_type)?;
};
if let Some(schema_infer_max_record) =
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
{
format.schema_infer_max_record =
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
error::ParseFormatSnafu {
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
value: schema_infer_max_record,
}
.build()
})?);
};
if let Some(has_header) = value.get(file_format::FORMAT_HAS_HEADER) {
format.has_header = has_header.parse().map_err(|_| {
error::ParseFormatSnafu {
key: file_format::FORMAT_HAS_HEADER,
value: has_header,
}
.build()
})?;
}
Ok(format)
}
}
impl Default for CsvFormat {
fn default() -> Self {
Self {
has_header: true,
delimiter: b',',
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
compression_type: CompressionType::UNCOMPRESSED,
}
}
}
#[derive(Debug, Clone, Builder)]
pub struct CsvConfig {
batch_size: usize,
file_schema: SchemaRef,
#[builder(default = "None")]
file_projection: Option<Vec<usize>>,
#[builder(default = "true")]
has_header: bool,
#[builder(default = "b','")]
delimiter: u8,
}
impl CsvConfig {
fn builder(&self) -> csv::ReaderBuilder {
let mut builder = csv::ReaderBuilder::new()
.with_schema(self.file_schema.clone())
.with_delimiter(self.delimiter)
.with_batch_size(self.batch_size)
.has_header(self.has_header);
if let Some(proj) = &self.file_projection {
builder = builder.with_projection(proj.clone());
}
builder
}
}
#[derive(Debug, Clone)]
pub struct CsvOpener {
config: Arc<CsvConfig>,
object_store: Arc<ObjectStore>,
compression_type: CompressionType,
}
impl CsvOpener {
/// Return a new [`CsvOpener`]. The caller must ensure [`CsvConfig`].file_schema must correspond to the opening file.
pub fn new(
config: CsvConfig,
object_store: ObjectStore,
compression_type: CompressionType,
) -> Self {
CsvOpener {
config: Arc::new(config),
object_store: Arc::new(object_store),
compression_type,
}
}
}
impl FileOpener for CsvOpener {
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
open_with_decoder(
self.object_store.clone(),
meta.location().to_string(),
self.compression_type,
|| Ok(self.config.builder().build_decoder()),
)
}
}
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<Schema> {
let reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let decoded = self.compression_type.convert_async_read(reader);
let delimiter = self.delimiter;
let schema_infer_max_record = self.schema_infer_max_record;
let has_header = self.has_header;
common_runtime::spawn_blocking_read(move || {
let reader = SyncIoBridge::new(decoded);
let (schema, _records_read) =
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
.context(error::InferSchemaSnafu { path: &path })?;
Ok(schema)
})
.await
.context(error::JoinHandleSnafu)?
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::{
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMTERL, FORMAT_HAS_HEADER,
FORMAT_SCHEMA_INFER_MAX_RECORD,
};
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/csv").display().to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let csv = CsvFormat::default();
let store = test_store(&test_data_root());
let schema = csv
.infer_schema(&store, "simple.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"c1: Utf8: NULL",
"c2: Int64: NULL",
"c3: Int64: NULL",
"c4: Int64: NULL",
"c5: Int64: NULL",
"c6: Int64: NULL",
"c7: Int64: NULL",
"c8: Int64: NULL",
"c9: Int64: NULL",
"c10: Int64: NULL",
"c11: Float64: NULL",
"c12: Float64: NULL",
"c13: Utf8: NULL"
],
formatted,
);
}
#[tokio::test]
async fn infer_schema_with_limit() {
let json = CsvFormat {
schema_infer_max_record: Some(3),
..CsvFormat::default()
};
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Int64: NULL",
"d: Int64: NULL"
],
formatted
);
let json = CsvFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Int64: NULL",
"d: Utf8: NULL"
],
formatted
);
}
#[test]
fn test_try_from() {
let mut map = HashMap::new();
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
assert_eq!(format, CsvFormat::default());
map.insert(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
);
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
map.insert(FORMAT_DELIMTERL.to_string(), b'\t'.to_string());
map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string());
let format = CsvFormat::try_from(&map).unwrap();
assert_eq!(
format,
CsvFormat {
compression_type: CompressionType::ZSTD,
schema_infer_max_record: Some(2000),
delimiter: b'\t',
has_header: false,
}
);
}
}

View File

@@ -0,0 +1,217 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::io::BufReader;
use std::str::FromStr;
use std::sync::Arc;
use arrow::datatypes::SchemaRef;
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
use arrow::json::RawReaderBuilder;
use arrow_schema::Schema;
use async_trait::async_trait;
use common_runtime;
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::io::SyncIoBridge;
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::file_format::{self, open_with_decoder, FileFormat};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct JsonFormat {
pub schema_infer_max_record: Option<usize>,
pub compression_type: CompressionType,
}
impl TryFrom<&HashMap<String, String>> for JsonFormat {
type Error = error::Error;
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
let mut format = JsonFormat::default();
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
format.compression_type = CompressionType::from_str(compression_type)?
};
if let Some(schema_infer_max_record) =
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
{
format.schema_infer_max_record =
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
error::ParseFormatSnafu {
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
value: schema_infer_max_record,
}
.build()
})?);
};
Ok(format)
}
}
impl Default for JsonFormat {
fn default() -> Self {
Self {
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
compression_type: CompressionType::UNCOMPRESSED,
}
}
}
#[async_trait]
impl FileFormat for JsonFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<Schema> {
let reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let decoded = self.compression_type.convert_async_read(reader);
let schema_infer_max_record = self.schema_infer_max_record;
common_runtime::spawn_blocking_read(move || {
let mut reader = BufReader::new(SyncIoBridge::new(decoded));
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
let schema = infer_json_schema_from_iterator(iter)
.context(error::InferSchemaSnafu { path: &path })?;
Ok(schema)
})
.await
.context(error::JoinHandleSnafu)?
}
}
#[derive(Debug, Clone)]
pub struct JsonOpener {
batch_size: usize,
projected_schema: SchemaRef,
object_store: Arc<ObjectStore>,
compression_type: CompressionType,
}
impl JsonOpener {
/// Return a new [`JsonOpener`]. Any fields not present in `projected_schema` will be ignored.
pub fn new(
batch_size: usize,
projected_schema: SchemaRef,
object_store: ObjectStore,
compression_type: CompressionType,
) -> Self {
Self {
batch_size,
projected_schema,
object_store: Arc::new(object_store),
compression_type,
}
}
}
impl FileOpener for JsonOpener {
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
open_with_decoder(
self.object_store.clone(),
meta.location().to_string(),
self.compression_type,
|| {
RawReaderBuilder::new(self.projected_schema.clone())
.with_batch_size(self.batch_size)
.build_decoder()
.map_err(DataFusionError::from)
},
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/json").display().to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let json = JsonFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "simple.json".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Boolean: NULL",
"d: Utf8: NULL",
],
formatted
);
}
#[tokio::test]
async fn infer_schema_with_limit() {
let json = JsonFormat {
schema_infer_max_record: Some(3),
..JsonFormat::default()
};
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.json".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec!["a: Int64: NULL", "b: Float64: NULL", "c: Boolean: NULL"],
formatted
);
}
#[test]
fn test_try_from() {
let mut map = HashMap::new();
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(format, JsonFormat::default());
map.insert(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
);
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(
format,
JsonFormat {
compression_type: CompressionType::ZSTD,
schema_infer_max_record: Some(2000),
}
);
}
}

View File

@@ -0,0 +1,76 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use arrow_schema::Schema;
use async_trait::async_trait;
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
use datafusion::parquet::arrow::parquet_to_arrow_schema;
use object_store::ObjectStore;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::file_format::FileFormat;
#[derive(Debug, Default)]
pub struct ParquetFormat {}
#[async_trait]
impl FileFormat for ParquetFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<Schema> {
let mut reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let metadata = reader
.get_metadata()
.await
.context(error::ReadParquetSnafuSnafu)?;
let file_metadata = metadata.file_metadata();
let schema = parquet_to_arrow_schema(
file_metadata.schema_descr(),
file_metadata.key_value_metadata(),
)
.context(error::ParquetToSchemaSnafu)?;
Ok(schema)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::FileFormat;
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/parquet")
.display()
.to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let json = ParquetFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "basic.parquet".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(vec!["num: Int64: NULL", "str: Utf8: NULL"], formatted);
}
}

View File

@@ -0,0 +1,161 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::vec;
use arrow_schema::SchemaRef;
use datafusion::assert_batches_eq;
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use futures::StreamExt;
use crate::compression::CompressionType;
use crate::file_format::csv::{CsvConfigBuilder, CsvOpener};
use crate::file_format::json::JsonOpener;
use crate::test_util::{self, test_basic_schema, test_store};
fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
FileScanConfig {
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
file_schema,
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
statistics: Default::default(),
projection: None,
limit,
table_partition_cols: vec![],
output_ordering: None,
infinite_source: false,
}
}
struct Test<'a, T: FileOpener> {
config: FileScanConfig,
opener: T,
expected: Vec<&'a str>,
}
impl<'a, T: FileOpener> Test<'a, T> {
pub async fn run(self) {
let result = FileStream::new(
&self.config,
0,
self.opener,
&ExecutionPlanMetricsSet::new(),
)
.unwrap()
.map(|b| b.unwrap())
.collect::<Vec<_>>()
.await;
assert_batches_eq!(self.expected, &result);
}
}
#[tokio::test]
async fn test_json_opener() {
let store = test_store("/");
let schema = test_basic_schema();
let json_opener = JsonOpener::new(
100,
schema.clone(),
store.clone(),
CompressionType::UNCOMPRESSED,
);
let path = &test_util::get_data_dir("tests/json/basic.json")
.display()
.to_string();
let tests = [
Test {
config: scan_config(schema.clone(), None, path),
opener: json_opener.clone(),
expected: vec![
"+-----+-------+",
"| num | str |",
"+-----+-------+",
"| 5 | test |",
"| 2 | hello |",
"| 4 | foo |",
"+-----+-------+",
],
},
Test {
config: scan_config(schema.clone(), Some(1), path),
opener: json_opener.clone(),
expected: vec![
"+-----+------+",
"| num | str |",
"+-----+------+",
"| 5 | test |",
"+-----+------+",
],
},
];
for test in tests {
test.run().await;
}
}
#[tokio::test]
async fn test_csv_opener() {
let store = test_store("/");
let schema = test_basic_schema();
let path = &test_util::get_data_dir("tests/csv/basic.csv")
.display()
.to_string();
let csv_conf = CsvConfigBuilder::default()
.batch_size(test_util::TEST_BATCH_SIZE)
.file_schema(schema.clone())
.build()
.unwrap();
let csv_opener = CsvOpener::new(csv_conf, store, CompressionType::UNCOMPRESSED);
let tests = [
Test {
config: scan_config(schema.clone(), None, path),
opener: csv_opener.clone(),
expected: vec![
"+-----+-------+",
"| num | str |",
"+-----+-------+",
"| 5 | test |",
"| 2 | hello |",
"| 4 | foo |",
"+-----+-------+",
],
},
Test {
config: scan_config(schema.clone(), Some(1), path),
opener: csv_opener.clone(),
expected: vec![
"+-----+------+",
"| num | str |",
"+-----+------+",
"| 5 | test |",
"+-----+------+",
],
},
];
for test in tests {
test.run().await;
}
}

View File

@@ -12,7 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod compression;
pub mod error;
pub mod file_format;
pub mod lister;
pub mod object_store;
#[cfg(test)]
pub mod test_util;
pub mod util;

View File

@@ -27,7 +27,7 @@ use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// parse url returns (schema,Option<host>,path)
/// Returns (schema, Option<host>, path)
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {
@@ -43,7 +43,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
}
}
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
pub fn build_backend(url: &str, connection: &HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
match schema.to_uppercase().as_str() {

View File

@@ -30,7 +30,7 @@ const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
pub fn build_s3_backend(
host: &str,
path: &str,
connection: HashMap<String, String>,
connection: &HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();

View File

@@ -0,0 +1,59 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use object_store::services::Fs;
use object_store::ObjectStore;
pub const TEST_BATCH_SIZE: usize = 100;
pub fn get_data_dir(path: &str) -> PathBuf {
// https://doc.rust-lang.org/cargo/reference/environment-variables.html
let dir = env!("CARGO_MANIFEST_DIR");
PathBuf::from(dir).join(path)
}
pub fn format_schema(schema: Schema) -> Vec<String> {
schema
.fields()
.iter()
.map(|f| {
format!(
"{}: {:?}: {}",
f.name(),
f.data_type(),
if f.is_nullable() { "NULL" } else { "NOT NULL" }
)
})
.collect()
}
pub fn test_store(root: &str) -> ObjectStore {
let mut builder = Fs::default();
builder.root(root);
ObjectStore::new(builder).unwrap().finish()
}
pub fn test_basic_schema() -> SchemaRef {
let schema = Schema::new(vec![
Field::new("num", DataType::Int64, false),
Field::new("str", DataType::Utf8, false),
]);
Arc::new(schema)
}

View File

@@ -0,0 +1,24 @@
### Parquet
The `parquet/basic.parquet` was converted from `csv/basic.csv` via [bdt](https://github.com/andygrove/bdt).
Internal of `parquet/basic.parquet`:
Data:
```
+-----+-------+
| num | str |
+-----+-------+
| 5 | test |
| 2 | hello |
| 4 | foo |
+-----+-------+
```
Schema:
```
+-------------+-----------+-------------+
| column_name | data_type | is_nullable |
+-------------+-----------+-------------+
| num | Int64 | YES |
| str | Utf8 | YES |
+-------------+-----------+-------------+
```

View File

@@ -0,0 +1,4 @@
num,str
5,test
2,hello
4,foo
1 num str
2 5 test
3 2 hello
4 4 foo

View File

@@ -0,0 +1,5 @@
a,b,c,d
1,2,3,4
1,2,3,4
1,2.0,3,4
1,2,4,test
1 a b c d
2 1 2 3 4
3 1 2 3 4
4 1 2.0 3 4
5 1 2 4 test

View File

@@ -0,0 +1,11 @@
c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13
c,2,1,18109,2033001162,-6513304855495910254,25,43062,1491205016,5863949479783605708,0.110830784,0.9294097332465232,6WfVFBVGJSQb7FhA7E0lBwdvjfZnSW
d,5,-40,22614,706441268,-7542719935673075327,155,14337,3373581039,11720144131976083864,0.69632107,0.3114712539863804,C2GT5KVyOPZpgKVl110TyZO0NcJ434
b,1,29,-18218,994303988,5983957848665088916,204,9489,3275293996,14857091259186476033,0.53840446,0.17909035118828576,AyYVExXK6AR2qUTxNZ7qRHQOVGMLcz
a,1,-85,-15154,1171968280,1919439543497968449,77,52286,774637006,12101411955859039553,0.12285209,0.6864391962767343,0keZ5G8BffGwgF2RwQD59TFzMStxCB
b,5,-82,22080,1824882165,7373730676428214987,208,34331,3342719438,3330177516592499461,0.82634634,0.40975383525297016,Ig1QcuKsjHXkproePdERo2w0mYzIqd
b,4,-111,-1967,-4229382,1892872227362838079,67,9832,1243785310,8382489916947120498,0.06563997,0.152498292971736,Sfx0vxv1skzZWT1PqVdoRDdO6Sb6xH
e,3,104,-25136,1738331255,300633854973581194,139,20807,3577318119,13079037564113702254,0.40154034,0.7764360990307122,DuJNG8tufSqW0ZstHqWj3aGvFLMg4A
a,3,13,12613,1299719633,2020498574254265315,191,17835,3998790955,14881411008939145569,0.041445434,0.8813167497816289,Amn2K87Db5Es3dFQO9cw9cvpAM6h35
d,1,38,18384,-335410409,-1632237090406591229,26,57510,2712615025,1842662804748246269,0.6064476,0.6404495093354053,4HX6feIvmNXBN7XGqgO4YVBkhu8GDI
a,4,-38,20744,762932956,308913475857409919,7,45465,1787652631,878137512938218976,0.7459874,0.02182578039211991,ydkwycaISlYSlEq3TlkS2m15I2pcp8
1 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13
2 c 2 1 18109 2033001162 -6513304855495910254 25 43062 1491205016 5863949479783605708 0.110830784 0.9294097332465232 6WfVFBVGJSQb7FhA7E0lBwdvjfZnSW
3 d 5 -40 22614 706441268 -7542719935673075327 155 14337 3373581039 11720144131976083864 0.69632107 0.3114712539863804 C2GT5KVyOPZpgKVl110TyZO0NcJ434
4 b 1 29 -18218 994303988 5983957848665088916 204 9489 3275293996 14857091259186476033 0.53840446 0.17909035118828576 AyYVExXK6AR2qUTxNZ7qRHQOVGMLcz
5 a 1 -85 -15154 1171968280 1919439543497968449 77 52286 774637006 12101411955859039553 0.12285209 0.6864391962767343 0keZ5G8BffGwgF2RwQD59TFzMStxCB
6 b 5 -82 22080 1824882165 7373730676428214987 208 34331 3342719438 3330177516592499461 0.82634634 0.40975383525297016 Ig1QcuKsjHXkproePdERo2w0mYzIqd
7 b 4 -111 -1967 -4229382 1892872227362838079 67 9832 1243785310 8382489916947120498 0.06563997 0.152498292971736 Sfx0vxv1skzZWT1PqVdoRDdO6Sb6xH
8 e 3 104 -25136 1738331255 300633854973581194 139 20807 3577318119 13079037564113702254 0.40154034 0.7764360990307122 DuJNG8tufSqW0ZstHqWj3aGvFLMg4A
9 a 3 13 12613 1299719633 2020498574254265315 191 17835 3998790955 14881411008939145569 0.041445434 0.8813167497816289 Amn2K87Db5Es3dFQO9cw9cvpAM6h35
10 d 1 38 18384 -335410409 -1632237090406591229 26 57510 2712615025 1842662804748246269 0.6064476 0.6404495093354053 4HX6feIvmNXBN7XGqgO4YVBkhu8GDI
11 a 4 -38 20744 762932956 308913475857409919 7 45465 1787652631 878137512938218976 0.7459874 0.02182578039211991 ydkwycaISlYSlEq3TlkS2m15I2pcp8

View File

@@ -0,0 +1,3 @@
{"num":5,"str":"test"}
{"num":2,"str":"hello"}
{"num":4,"str":"foo"}

View File

@@ -0,0 +1,4 @@
{"a":1}
{"a":-10, "b":-3.5}
{"a":2, "b":0.6, "c":false}
{"a":1, "b":2.0, "c":false, "d":"4"}

View File

@@ -0,0 +1,12 @@
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":-10, "b":-3.5, "c":true, "d":"4"}
{"a":2, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":7, "b":-3.5, "c":true, "d":"4"}
{"a":1, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":5, "b":-3.5, "c":true, "d":"4"}
{"a":1, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":1, "b":-3.5, "c":true, "d":"4"}
{"a":100000000000000, "b":0.6, "c":false, "d":"text"}

Binary file not shown.

View File

@@ -212,7 +212,7 @@ fn build_calc_fn(
fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), #num_params);
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.data().clone().into())?; )*
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.to_data().into())?; )*
// TODO(ruihang): add ensure!()

View File

@@ -8,7 +8,7 @@ license.workspace = true
async-trait.workspace = true
common-error = { path = "../error" }
common-telemetry = { path = "../telemetry" }
metrics = "0.20"
metrics.workspace = true
once_cell = "1.12"
paste.workspace = true
snafu.workspace = true

View File

@@ -14,16 +14,18 @@ common-error = { path = "../error" }
common-telemetry = { path = "../telemetry" }
datafusion.workspace = true
datafusion-expr.workspace = true
datafusion-substrait.workspace = true
datatypes = { path = "../../datatypes" }
futures = "0.3"
prost.workspace = true
session = { path = "../../session" }
snafu.workspace = true
table = { path = "../../table" }
query = { path = "../../query" }
[dependencies.substrait_proto]
package = "substrait"
version = "0.4"
version = "0.7"
[dev-dependencies]
datatypes = { path = "../../datatypes" }

View File

@@ -752,6 +752,14 @@ mod utils {
BuiltinScalarFunction::Uuid => "uuid",
BuiltinScalarFunction::Struct => "struct",
BuiltinScalarFunction::ArrowTypeof => "arrow_type_of",
BuiltinScalarFunction::Acosh => "acosh",
BuiltinScalarFunction::Asinh => "asinh",
BuiltinScalarFunction::Atanh => "atanh",
BuiltinScalarFunction::Cbrt => "cbrt",
BuiltinScalarFunction::Cosh => "cosh",
BuiltinScalarFunction::Pi => "pi",
BuiltinScalarFunction::Sinh => "sinh",
BuiltinScalarFunction::Tanh => "tanh",
}
}
}

View File

@@ -49,10 +49,10 @@ use crate::error::{
use crate::schema::{from_schema, to_schema};
use crate::SubstraitPlan;
pub struct DFLogicalSubstraitConvertor;
pub struct DFLogicalSubstraitConvertorDeprecated;
#[async_trait]
impl SubstraitPlan for DFLogicalSubstraitConvertor {
impl SubstraitPlan for DFLogicalSubstraitConvertorDeprecated {
type Error = Error;
type Plan = LogicalPlan;
@@ -76,7 +76,7 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
}
}
impl DFLogicalSubstraitConvertor {
impl DFLogicalSubstraitConvertorDeprecated {
async fn convert_plan(
&self,
mut plan: Plan,
@@ -197,6 +197,14 @@ impl DFLogicalSubstraitConvertor {
name: "Cross Relation",
}
.fail()?,
RelType::HashJoin(_) => UnsupportedPlanSnafu {
name: "Cross Relation",
}
.fail()?,
RelType::MergeJoin(_) => UnsupportedPlanSnafu {
name: "Cross Relation",
}
.fail()?,
};
Ok(logical_plan)
@@ -311,7 +319,7 @@ impl DFLogicalSubstraitConvertor {
}
}
impl DFLogicalSubstraitConvertor {
impl DFLogicalSubstraitConvertorDeprecated {
fn logical_plan_to_rel(
&self,
ctx: &mut ConvertorContext,
@@ -585,7 +593,7 @@ mod test {
}
async fn logical_plan_round_trip(plan: LogicalPlan, catalog: CatalogManagerRef) {
let convertor = DFLogicalSubstraitConvertor;
let convertor = DFLogicalSubstraitConvertorDeprecated;
let proto = convertor.encode(plan.clone()).unwrap();
let tripped_plan = convertor.decode(proto, catalog).await.unwrap();

View File

@@ -0,0 +1,72 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use bytes::{Buf, Bytes, BytesMut};
use catalog::datafusion::catalog_adapter::DfCatalogProviderAdapter;
use catalog::CatalogManagerRef;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use datafusion::prelude::SessionContext;
use datafusion_expr::LogicalPlan;
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
use prost::Message;
use snafu::ResultExt;
use substrait_proto::proto::Plan;
use crate::error::{DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error};
use crate::SubstraitPlan;
pub struct DFLogicalSubstraitConvertor;
#[async_trait]
impl SubstraitPlan for DFLogicalSubstraitConvertor {
type Error = Error;
type Plan = LogicalPlan;
async fn decode<B: Buf + Send>(
&self,
message: B,
catalog_manager: CatalogManagerRef,
) -> Result<Self::Plan, Self::Error> {
let mut context = SessionContext::new();
context.register_catalog(
DEFAULT_CATALOG_NAME,
Arc::new(DfCatalogProviderAdapter::new(
catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap(),
)),
);
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
let df_plan = from_substrait_plan(&mut context, &plan)
.await
.context(DecodeDfPlanSnafu)?;
Ok(df_plan)
}
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
let mut buf = BytesMut::new();
let substrait_plan = to_substrait_plan(&plan).context(EncodeDfPlanSnafu)?;
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
Ok(buf.freeze())
}
}

View File

@@ -109,6 +109,18 @@ pub enum Error {
#[snafu(backtrace)]
source: catalog::error::Error,
},
#[snafu(display("Failed to encode DataFusion plan, source: {}", source))]
EncodeDfPlan {
source: datafusion::error::DataFusionError,
location: Location,
},
#[snafu(display("Failed to decode DataFusion plan, source: {}", source))]
DecodeDfPlan {
source: datafusion::error::DataFusionError,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -129,7 +141,10 @@ impl ErrorExt for Error {
| Error::InvalidParameters { .. }
| Error::TableNotFound { .. }
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
Error::DFInternal { .. }
| Error::Internal { .. }
| Error::EncodeDfPlan { .. }
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
Error::ConvertDfSchema { source } => source.status_code(),
Error::ResolveTable { source, .. } => source.status_code(),
}

View File

@@ -18,6 +18,7 @@
mod context;
mod df_expr;
mod df_logical;
mod df_substrait;
pub mod error;
mod schema;
mod types;
@@ -26,7 +27,7 @@ use async_trait::async_trait;
use bytes::{Buf, Bytes};
use catalog::CatalogManagerRef;
pub use crate::df_logical::DFLogicalSubstraitConvertor;
pub use crate::df_substrait::DFLogicalSubstraitConvertor;
#[async_trait]
pub trait SubstraitPlan {

View File

@@ -12,8 +12,8 @@ deadlock_detection = ["parking_lot"]
backtrace = "0.3"
common-error = { path = "../error" }
console-subscriber = { version = "0.1", optional = true }
metrics = "0.20.1"
metrics-exporter-prometheus = { version = "0.11", default-features = false }
metrics-exporter-prometheus = { git = "https://github.com/GreptimeTeam/metrics.git", rev = "174de287e9f7f9f57c0272be56c95df156489476", default-features = false }
metrics.workspace = true
once_cell = "1.10"
opentelemetry = { version = "0.17", default-features = false, features = [
"trace",

View File

@@ -32,7 +32,9 @@ pub fn init_default_metrics_recorder() {
/// Init prometheus recorder.
fn init_prometheus_recorder() {
let recorder = PrometheusBuilder::new().build_recorder();
let recorder = PrometheusBuilder::new()
.add_global_prefix("greptime".to_string())
.build_recorder();
let mut h = PROMETHEUS_HANDLE.as_ref().write().unwrap();
*h = Some(recorder.handle());
// TODO(LFC): separate metrics for testing and metrics for production
@@ -60,6 +62,7 @@ pub fn try_handle() -> Option<PrometheusHandle> {
pub struct Timer {
start: Instant,
name: &'static str,
labels: Vec<(String, String)>,
}
impl Timer {
@@ -67,9 +70,25 @@ impl Timer {
Self {
start: Instant::now(),
name,
labels: Vec::new(),
}
}
pub fn new_with_labels<S: Into<String> + Clone>(name: &'static str, labels: &[(S, S)]) -> Self {
Self {
start: Instant::now(),
name,
labels: labels
.iter()
.map(|(k, v)| (k.clone().into(), v.clone().into()))
.collect::<Vec<_>>(),
}
}
pub fn labels_mut(&mut self) -> &mut Vec<(String, String)> {
self.labels.as_mut()
}
pub fn elapsed(&self) -> Duration {
self.start.elapsed()
}
@@ -77,7 +96,11 @@ impl Timer {
impl Drop for Timer {
fn drop(&mut self) {
histogram!(self.name, self.start.elapsed());
if !self.labels.is_empty() {
histogram!(self.name, self.start.elapsed(), &self.labels);
} else {
histogram!(self.name, self.start.elapsed());
}
}
}
@@ -86,6 +109,9 @@ macro_rules! timer {
($name: expr) => {
$crate::metric::Timer::new($name)
};
($name:expr,$labels:expr) => {
$crate::metric::Timer::new_with_labels($name, $labels)
};
}
#[cfg(test)]
@@ -109,4 +135,48 @@ mod tests {
assert!(text.contains("test_elapsed_timer_a"));
assert!(text.contains("test_elapsed_timer_b"));
}
#[test]
fn test_elapsed_timer_with_label() {
init_default_metrics_recorder();
{
let t = Timer::new("test_elapsed_timer_a");
drop(t);
}
let handle = try_handle().unwrap();
let text = handle.render();
assert!(text.contains("test_elapsed_timer_a"));
assert!(!text.contains("test_elapsed_timer_b"));
let label_a = "label_a";
let label_b = "label_b";
let label_c = "label_c";
let label_d = "label_d";
let label_e = "label_e";
assert!(!text.contains(label_a));
assert!(!text.contains(label_b));
{
let mut timer_b = timer!("test_elapsed_timer_b", &[(label_a, "a"), (label_b, "b")]);
let labels = timer_b.labels_mut();
labels.push((label_c.to_owned(), "d".to_owned()));
}
let text = handle.render();
assert!(text.contains("test_elapsed_timer_a"));
assert!(text.contains("test_elapsed_timer_b"));
assert!(text.contains(label_a));
assert!(text.contains(label_b));
assert!(text.contains(label_c));
{
let mut timer_c = timer!("test_elapsed_timer_c");
let labels = timer_c.labels_mut();
labels.push((label_d.to_owned(), "d".to_owned()));
labels.push((label_e.to_owned(), "e".to_owned()));
}
let text = handle.render();
assert!(text.contains("test_elapsed_timer_c"));
assert!(text.contains(label_d));
assert!(text.contains(label_e));
}
}

View File

@@ -15,13 +15,13 @@
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use chrono::NaiveDateTime;
use chrono::{Local, LocalResult, NaiveDateTime, TimeZone};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{Error, ParseDateStrSnafu, Result};
use crate::error::{Error, InvalidDateStrSnafu, Result};
const DATETIME_FORMAT: &str = "%F %T";
const DATETIME_FORMAT_WITH_TZ: &str = "%F %T%z";
/// [DateTime] represents the **seconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
#[derive(
@@ -32,7 +32,13 @@ pub struct DateTime(i64);
impl Display for DateTime {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some(abs_time) = NaiveDateTime::from_timestamp_opt(self.0, 0) {
write!(f, "{}", abs_time.format(DATETIME_FORMAT))
write!(
f,
"{}",
Local {}
.from_utc_datetime(&abs_time)
.format(DATETIME_FORMAT_WITH_TZ)
)
} else {
write!(f, "DateTime({})", self.0)
}
@@ -49,9 +55,21 @@ impl FromStr for DateTime {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let datetime = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT)
.context(ParseDateStrSnafu { raw: s })?;
Ok(Self(datetime.timestamp()))
let local = Local {};
let timestamp = if let Ok(d) = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT) {
match local.from_local_datetime(&d) {
LocalResult::None => {
return InvalidDateStrSnafu { raw: s }.fail();
}
LocalResult::Single(d) | LocalResult::Ambiguous(d, _) => d.naive_utc().timestamp(),
}
} else if let Ok(v) = chrono::DateTime::parse_from_str(s, DATETIME_FORMAT_WITH_TZ) {
v.timestamp()
} else {
return InvalidDateStrSnafu { raw: s }.fail();
};
Ok(Self(timestamp))
}
}
@@ -81,14 +99,16 @@ mod tests {
#[test]
pub fn test_new_date_time() {
assert_eq!("1970-01-01 00:00:00", DateTime::new(0).to_string());
assert_eq!("1970-01-01 00:00:01", DateTime::new(1).to_string());
assert_eq!("1969-12-31 23:59:59", DateTime::new(-1).to_string());
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!("1970-01-01 08:00:00+0800", DateTime::new(0).to_string());
assert_eq!("1970-01-01 08:00:01+0800", DateTime::new(1).to_string());
assert_eq!("1970-01-01 07:59:59+0800", DateTime::new(-1).to_string());
}
#[test]
pub fn test_parse_from_string() {
let time = "1970-01-01 00:00:00";
std::env::set_var("TZ", "Asia/Shanghai");
let time = "1970-01-01 00:00:00+0800";
let dt = DateTime::from_str(time).unwrap();
assert_eq!(time, &dt.to_string());
}
@@ -98,4 +118,22 @@ mod tests {
let d: DateTime = 42.into();
assert_eq!(42, d.val());
}
#[test]
fn test_parse_local_date_time() {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
-28800,
DateTime::from_str("1970-01-01 00:00:00").unwrap().val()
);
assert_eq!(0, DateTime::from_str("1970-01-01 08:00:00").unwrap().val());
}
#[test]
fn test_parse_local_date_time_with_tz() {
let ts = DateTime::from_str("1970-01-01 08:00:00+0000")
.unwrap()
.val();
assert_eq!(28800, ts);
}
}

View File

@@ -26,6 +26,9 @@ pub enum Error {
#[snafu(display("Failed to parse string to date, raw: {}, source: {}", raw, source))]
ParseDateStr { raw: String, source: ParseError },
#[snafu(display("Invalid date string, raw: {}", raw))]
InvalidDateStr { raw: String, location: Location },
#[snafu(display("Failed to parse a string into Timestamp, raw string: {}", raw))]
ParseTimestamp { raw: String, location: Location },
@@ -46,7 +49,9 @@ impl ErrorExt for Error {
StatusCode::InvalidArguments
}
Error::TimestampOverflow { .. } => StatusCode::Internal,
Error::ArithmeticOverflow { .. } => StatusCode::InvalidArguments,
Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => {
StatusCode::InvalidArguments
}
}
}
@@ -60,6 +65,7 @@ impl ErrorExt for Error {
| Error::TimestampOverflow { location, .. }
| Error::ArithmeticOverflow { location, .. } => Some(*location),
Error::ParseDateStr { .. } => None,
Error::InvalidDateStr { location, .. } => Some(*location),
}
}
}

View File

@@ -164,16 +164,20 @@ impl Timestamp {
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
/// represent, this function simply print the timestamp unit and value in plain string.
pub fn to_iso8601_string(&self) -> String {
if let LocalResult::Single(datetime) = self.to_chrono_datetime() {
format!("{}", datetime.format("%Y-%m-%d %H:%M:%S%.f%z"))
if let Some(v) = self.to_chrono_datetime() {
let local = Local {};
format!(
"{}",
local.from_utc_datetime(&v).format("%Y-%m-%d %H:%M:%S%.f%z")
)
} else {
format!("[Timestamp{}: {}]", self.unit, self.value)
}
}
pub fn to_chrono_datetime(&self) -> LocalResult<DateTime<Utc>> {
pub fn to_chrono_datetime(&self) -> Option<NaiveDateTime> {
let (sec, nsec) = self.split();
Utc.timestamp_opt(sec, nsec)
NaiveDateTime::from_timestamp_opt(sec, nsec)
}
}
@@ -636,31 +640,33 @@ mod tests {
#[test]
fn test_to_iso8601_string() {
std::env::set_var("TZ", "Asia/Shanghai");
let datetime_str = "2020-09-08 13:42:29.042+0000";
let ts = Timestamp::from_str(datetime_str).unwrap();
assert_eq!(datetime_str, ts.to_iso8601_string());
assert_eq!("2020-09-08 21:42:29.042+0800", ts.to_iso8601_string());
let ts_millis = 1668070237000;
let ts = Timestamp::new_millisecond(ts_millis);
assert_eq!("2022-11-10 08:50:37+0000", ts.to_iso8601_string());
assert_eq!("2022-11-10 16:50:37+0800", ts.to_iso8601_string());
let ts_millis = -1000;
let ts = Timestamp::new_millisecond(ts_millis);
assert_eq!("1969-12-31 23:59:59+0000", ts.to_iso8601_string());
assert_eq!("1970-01-01 07:59:59+0800", ts.to_iso8601_string());
let ts_millis = -1;
let ts = Timestamp::new_millisecond(ts_millis);
assert_eq!("1969-12-31 23:59:59.999+0000", ts.to_iso8601_string());
assert_eq!("1970-01-01 07:59:59.999+0800", ts.to_iso8601_string());
let ts_millis = -1001;
let ts = Timestamp::new_millisecond(ts_millis);
assert_eq!("1969-12-31 23:59:58.999+0000", ts.to_iso8601_string());
assert_eq!("1970-01-01 07:59:58.999+0800", ts.to_iso8601_string());
}
#[test]
fn test_serialize_to_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
"1970-01-01 00:00:01+0000",
"1970-01-01 08:00:01+0800",
match serde_json::Value::from(Timestamp::new(1, TimeUnit::Second)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -668,7 +674,7 @@ mod tests {
);
assert_eq!(
"1970-01-01 00:00:00.001+0000",
"1970-01-01 08:00:00.001+0800",
match serde_json::Value::from(Timestamp::new(1, TimeUnit::Millisecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -676,7 +682,7 @@ mod tests {
);
assert_eq!(
"1970-01-01 00:00:00.000001+0000",
"1970-01-01 08:00:00.000001+0800",
match serde_json::Value::from(Timestamp::new(1, TimeUnit::Microsecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -684,7 +690,7 @@ mod tests {
);
assert_eq!(
"1970-01-01 00:00:00.000000001+0000",
"1970-01-01 08:00:00.000000001+0800",
match serde_json::Value::from(Timestamp::new(1, TimeUnit::Nanosecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -869,4 +875,18 @@ mod tests {
assert_eq!(1, res.value);
assert_eq!(TimeUnit::Second, res.unit);
}
#[test]
fn test_parse_in_time_zone() {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
Timestamp::new(0, TimeUnit::Nanosecond),
Timestamp::from_str("1970-01-01 08:00:00.000").unwrap()
);
assert_eq!(
Timestamp::new(0, TimeUnit::Second),
Timestamp::from_str("1970-01-01 08:00:00").unwrap()
);
}
}

View File

@@ -38,7 +38,7 @@ log = "0.4"
log-store = { path = "../log-store" }
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv", features = ["mock"] }
metrics = "0.20"
metrics.workspace = true
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
pin-project = "1.0"

View File

@@ -14,14 +14,11 @@
use std::any::Any;
use common_datasource::error::Error as DataSourceError;
use common_error::prelude::*;
use common_procedure::ProcedureId;
use datafusion::parquet;
use snafu::Location;
use storage::error::Error as StorageError;
use table::error::Error as TableError;
use url::ParseError;
use crate::datanode::ObjectStoreConfig;
@@ -208,18 +205,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
#[snafu(backtrace)]
source: DataSourceError,
},
#[snafu(display("Failed to parse url, source: {}", source))]
ParseUrl {
source: DataSourceError,
location: Location,
},
#[snafu(display("Runtime resource error, source: {}", source))]
RuntimeResource {
#[snafu(backtrace)]
@@ -229,36 +214,6 @@ pub enum Error {
#[snafu(display("Invalid SQL, error: {}", msg))]
InvalidSql { msg: String },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Invalid filepath: {}", path))]
InvalidPath { path: String },
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
#[snafu(display("Failed to regex, source: {}", source))]
BuildRegex {
location: Location,
source: regex::Error,
},
#[snafu(display("Failed to list objects, source: {}", source))]
ListObjects {
#[snafu(backtrace)]
source: DataSourceError,
},
#[snafu(display("Failed to parse the data, source: {}", source))]
ParseDataTypes {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Not support SQL, error: {}", msg))]
NotSupportSql { msg: String },
@@ -409,61 +364,6 @@ pub enum Error {
source: query::error::Error,
},
#[snafu(display("Failed to copy data from table: {}, source: {}", table_name, source))]
CopyTable {
table_name: String,
#[snafu(backtrace)]
source: TableError,
},
#[snafu(display("Failed to execute table scan, source: {}", source))]
TableScanExec {
#[snafu(backtrace)]
source: common_query::error::Error,
},
#[snafu(display(
"File Schema mismatch, expected table schema: {} but found :{}",
table_schema,
file_schema
))]
InvalidSchema {
table_schema: String,
file_schema: String,
},
#[snafu(display("Failed to read parquet file, source: {}", source))]
ReadParquet {
source: parquet::errors::ParquetError,
location: Location,
},
#[snafu(display("Failed to poll stream, source: {}", source))]
PollStream {
source: datafusion_common::DataFusionError,
location: Location,
},
#[snafu(display("Failed to build parquet record batch stream, source: {}", source))]
BuildParquetRecordBatchStream {
location: Location,
source: parquet::errors::ParquetError,
},
#[snafu(display("Failed to read object in path: {}, source: {}", path, source))]
ReadObject {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Failed to write object into path: {}, source: {}", path, source))]
WriteObject {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption {
#[snafu(backtrace)]
@@ -507,12 +407,6 @@ pub enum Error {
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Failed to copy table to parquet file, source: {}", source))]
WriteParquet {
#[snafu(backtrace)]
source: storage::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -552,11 +446,6 @@ impl ErrorExt for Error {
ColumnValuesNumberMismatch { .. }
| InvalidSql { .. }
| InvalidUrl { .. }
| InvalidPath { .. }
| InvalidConnection { .. }
| UnsupportedBackendProtocol { .. }
| BuildRegex { .. }
| NotSupportSql { .. }
| KeyColumnNotFound { .. }
| IllegalPrimaryKeysDef { .. }
@@ -570,8 +459,7 @@ impl ErrorExt for Error {
| DatabaseNotFound { .. }
| MissingNodeId { .. }
| MissingMetasrvOpts { .. }
| ColumnNoneDefaultValue { .. }
| ParseUrl { .. } => StatusCode::InvalidArguments,
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
// TODO(yingwen): Further categorize http error.
StartServer { .. }
@@ -584,22 +472,13 @@ impl ErrorExt for Error {
| RenameTable { .. }
| Catalog { .. }
| MissingRequiredField { .. }
| BuildParquetRecordBatchStream { .. }
| InvalidSchema { .. }
| ParseDataTypes { .. }
| IncorrectInternalState { .. }
| ShutdownServer { .. }
| ShutdownInstance { .. }
| CloseTableEngine { .. } => StatusCode::Internal,
BuildBackend { .. }
| InitBackend { .. }
| ReadParquet { .. }
| WriteParquet { .. }
| PollStream { .. }
| ReadObject { .. }
| WriteObject { .. }
| ListObjects { .. } => StatusCode::StorageUnavailable,
InitBackend { .. } => StatusCode::StorageUnavailable,
OpenLogStore { source } => source.status_code(),
OpenStorageEngine { source } => source.status_code(),
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
@@ -607,8 +486,6 @@ impl ErrorExt for Error {
TableIdProviderNotFound { .. } => StatusCode::Unsupported,
BumpTableId { source, .. } => source.status_code(),
ColumnDefaultValue { source, .. } => source.status_code(),
CopyTable { source, .. } => source.status_code(),
TableScanExec { source, .. } => source.status_code(),
UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
RecoverProcedure { source, .. } | SubmitProcedure { source, .. } => {
source.status_code()

View File

@@ -18,7 +18,7 @@ use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
use catalog::{datanode_stat, CatalogManagerRef};
use common_telemetry::{error, info, warn};
use common_telemetry::{error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
@@ -84,7 +84,7 @@ impl HeartbeatTask {
}
async fn handle_response(resp: HeartbeatResponse) {
info!("heartbeat response: {:?}", resp);
trace!("heartbeat response: {:?}", resp);
}
/// Start heartbeat task, spawn background task.

View File

@@ -20,6 +20,7 @@ use async_trait::async_trait;
use common_query::Output;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::plan::LogicalPlan;
use query::query_engine::SqlStatementExecutor;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::{QueryContext, QueryContextRef};
use snafu::prelude::*;
@@ -65,7 +66,7 @@ impl Instance {
match stmt {
// TODO(LFC): Remove SQL execution branch here.
// Keep this because substrait can't handle much of SQLs now.
QueryStatement::Sql(Statement::Query(_)) => {
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Promql(_) => {
let plan = self
.query_engine
.planner()
@@ -77,7 +78,9 @@ impl Instance {
.await
.context(ExecuteLogicalPlanSnafu)
}
_ => self.execute_stmt(stmt, ctx).await,
QueryStatement::Sql(stmt) => {
self.execute_sql(stmt, ctx).await.context(ExecuteSqlSnafu)
}
}
}
Query::LogicalPlan(plan) => self.execute_logical(plan).await,
@@ -242,12 +245,11 @@ mod test {
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
let stmt = QueryLanguageParser::parse_sql(
let Ok(QueryStatement::Sql(stmt)) = QueryLanguageParser::parse_sql(
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
)
.unwrap();
) else { unreachable!() };
let output = instance
.execute_stmt(stmt, QueryContext::arc())
.execute_sql(stmt, QueryContext::arc())
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(1)));

View File

@@ -21,37 +21,32 @@ use common_telemetry::logging::info;
use common_telemetry::timer;
use query::error::QueryExecutionSnafu;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::query_engine::StatementHandler;
use query::query_engine::SqlStatementExecutor;
use session::context::QueryContextRef;
use snafu::prelude::*;
use sql::ast::ObjectName;
use sql::statements::copy::{CopyTable, CopyTableArgument};
use sql::statements::statement::Statement;
use table::engine::TableReference;
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
use table::requests::{CreateDatabaseRequest, DropTableRequest};
use crate::error::{
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
TableIdProviderNotFoundSnafu,
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, NotSupportSqlSnafu,
PlanStatementSnafu, Result, TableIdProviderNotFoundSnafu,
};
use crate::instance::Instance;
use crate::metrics;
use crate::sql::{SqlHandler, SqlRequest};
impl Instance {
pub async fn execute_stmt(
&self,
stmt: QueryStatement,
query_ctx: QueryContextRef,
) -> Result<Output> {
async fn do_execute_sql(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
match stmt {
QueryStatement::Sql(Statement::Insert(insert)) => {
Statement::Insert(insert) => {
let request =
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
.await?;
self.sql_handler.insert(request).await
}
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
Statement::CreateDatabase(create_database) => {
let request = CreateDatabaseRequest {
db_name: create_database.name.to_string(),
create_if_not_exists: create_database.if_not_exists,
@@ -64,7 +59,7 @@ impl Instance {
.await
}
QueryStatement::Sql(Statement::CreateTable(create_table)) => {
Statement::CreateTable(create_table) => {
let table_id = self
.table_id_provider
.as_ref()
@@ -88,10 +83,7 @@ impl Instance {
.execute(SqlRequest::CreateTable(request), query_ctx)
.await
}
QueryStatement::Sql(Statement::CreateExternalTable(_create_external_table)) => {
unimplemented!()
}
QueryStatement::Sql(Statement::Alter(alter_table)) => {
Statement::Alter(alter_table) => {
let name = alter_table.table_name().clone();
let (catalog, schema, table) = table_idents_to_full_name(&name, query_ctx.clone())?;
let table_ref = TableReference::full(&catalog, &schema, &table);
@@ -100,7 +92,7 @@ impl Instance {
.execute(SqlRequest::Alter(req), query_ctx)
.await
}
QueryStatement::Sql(Statement::DropTable(drop_table)) => {
Statement::DropTable(drop_table) => {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(drop_table.table_name(), query_ctx.clone())?;
let req = DropTableRequest {
@@ -112,74 +104,10 @@ impl Instance {
.execute(SqlRequest::DropTable(req), query_ctx)
.await
}
QueryStatement::Sql(Statement::ShowDatabases(show_databases)) => {
self.sql_handler
.execute(SqlRequest::ShowDatabases(show_databases), query_ctx)
.await
_ => NotSupportSqlSnafu {
msg: format!("not supported to execute {stmt:?}"),
}
QueryStatement::Sql(Statement::ShowTables(show_tables)) => {
self.sql_handler
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
.await
}
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
}
QueryStatement::Sql(Statement::Copy(copy_table)) => {
let req = match copy_table {
CopyTable::To(copy_table) => {
let CopyTableArgument {
location,
connection,
pattern,
table_name,
..
} = copy_table;
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&table_name, query_ctx.clone())?;
CopyTableRequest {
catalog_name,
schema_name,
table_name,
location,
connection,
pattern,
direction: CopyDirection::Export,
}
}
CopyTable::From(copy_table) => {
let CopyTableArgument {
location,
connection,
pattern,
table_name,
..
} = copy_table;
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&table_name, query_ctx.clone())?;
CopyTableRequest {
catalog_name,
schema_name,
table_name,
location,
connection,
pattern,
direction: CopyDirection::Import,
}
}
};
self.sql_handler
.execute(SqlRequest::CopyTable(req), query_ctx)
.await
}
QueryStatement::Sql(Statement::Query(_))
| QueryStatement::Sql(Statement::Explain(_))
| QueryStatement::Sql(Statement::Use(_))
| QueryStatement::Sql(Statement::Tql(_))
| QueryStatement::Sql(Statement::Delete(_))
| QueryStatement::Sql(Statement::DescribeTable(_))
| QueryStatement::Promql(_) => unreachable!(),
.fail(),
}
}
@@ -188,7 +116,7 @@ impl Instance {
promql: &PromQuery,
query_ctx: QueryContextRef,
) -> Result<Output> {
let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
let _timer = timer!(metrics::HANDLE_PROMQL_ELAPSED);
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
@@ -276,13 +204,13 @@ pub fn table_idents_to_full_name(
}
#[async_trait]
impl StatementHandler for Instance {
async fn handle_statement(
impl SqlStatementExecutor for Instance {
async fn execute_sql(
&self,
stmt: QueryStatement,
stmt: Statement,
query_ctx: QueryContextRef,
) -> query::error::Result<Output> {
self.execute_stmt(stmt, query_ctx)
self.do_execute_sql(stmt, query_ctx)
.await
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)

View File

@@ -14,5 +14,5 @@
//! datanode metrics
pub const METRIC_HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed";
pub const METRIC_HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed";
pub const HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed";
pub const HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed";

View File

@@ -19,24 +19,19 @@ use common_error::prelude::BoxedError;
use common_procedure::ProcedureManagerRef;
use common_query::Output;
use common_telemetry::error;
use query::sql::{show_databases, show_tables};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use sql::statements::show::{ShowDatabases, ShowTables};
use table::engine::manager::TableEngineManagerRef;
use table::engine::{TableEngineProcedureRef, TableEngineRef, TableReference};
use table::requests::*;
use table::{Table, TableRef};
use crate::error::{
self, CloseTableEngineSnafu, ExecuteSqlSnafu, Result, TableEngineNotFoundSnafu,
TableNotFoundSnafu,
self, CloseTableEngineSnafu, Result, TableEngineNotFoundSnafu, TableNotFoundSnafu,
};
use crate::instance::sql::table_idents_to_full_name;
mod alter;
mod copy_table_from;
mod copy_table_to;
mod create;
mod drop_table;
mod flush_table;
@@ -49,9 +44,6 @@ pub enum SqlRequest {
Alter(AlterTableRequest),
DropTable(DropTableRequest),
FlushTable(FlushTableRequest),
ShowDatabases(ShowDatabases),
ShowTables(ShowTables),
CopyTable(CopyTableRequest),
}
// Handler to execute SQL except query
@@ -59,6 +51,8 @@ pub enum SqlRequest {
pub struct SqlHandler {
table_engine_manager: TableEngineManagerRef,
catalog_manager: CatalogManagerRef,
// TODO(yingwen): Support multiple table engine. We need to add a method
// to TableEngineManagerRef to return engine procedure by engine name.
engine_procedure: TableEngineProcedureRef,
procedure_manager: Option<ProcedureManagerRef>,
}
@@ -88,17 +82,6 @@ impl SqlHandler {
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
SqlRequest::Alter(req) => self.alter(req).await,
SqlRequest::DropTable(req) => self.drop_table(req).await,
SqlRequest::CopyTable(req) => match req.direction {
CopyDirection::Export => self.copy_table_to(req).await,
CopyDirection::Import => self.copy_table_from(req).await,
},
SqlRequest::ShowDatabases(req) => {
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
}
SqlRequest::ShowTables(req) => {
show_tables(req, self.catalog_manager.clone(), query_ctx.clone())
.context(ExecuteSqlSnafu)
}
SqlRequest::FlushTable(req) => self.flush_table(req).await,
};
if let Err(e) = &result {

View File

@@ -13,18 +13,25 @@
// limitations under the License.
use catalog::RenameTableRequest;
use common_procedure::{watcher, ProcedureManagerRef, ProcedureWithId};
use common_query::Output;
use common_telemetry::logging::info;
use snafu::prelude::*;
use sql::statements::alter::{AlterTable, AlterTableOperation};
use sql::statements::column_def_to_schema;
use table::engine::{EngineContext, TableReference};
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest};
use table_procedure::AlterTableProcedure;
use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn alter(&self, req: AlterTableRequest) -> Result<Output> {
if let Some(procedure_manager) = &self.procedure_manager {
return self.alter_table_by_procedure(procedure_manager, req).await;
}
let ctx = EngineContext {};
let table_name = req.table_name.clone();
let table_ref = TableReference {
@@ -71,6 +78,33 @@ impl SqlHandler {
Ok(Output::AffectedRows(0))
}
pub(crate) async fn alter_table_by_procedure(
&self,
procedure_manager: &ProcedureManagerRef,
req: AlterTableRequest,
) -> Result<Output> {
let table_name = req.table_name.clone();
let procedure = AlterTableProcedure::new(
req,
self.catalog_manager.clone(),
self.engine_procedure.clone(),
);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
let procedure_id = procedure_with_id.id;
info!("Alter table {} by procedure {}", table_name, procedure_id);
let mut watcher = procedure_manager
.submit(procedure_with_id)
.await
.context(error::SubmitProcedureSnafu { procedure_id })?;
watcher::wait(&mut watcher)
.await
.context(error::WaitProcedureSnafu { procedure_id })?;
Ok(Output::AffectedRows(0))
}
pub(crate) fn alter_to_request(
&self,
alter_table: AlterTable,
@@ -112,12 +146,15 @@ mod tests {
use std::assert_matches::assert_matches;
use datatypes::prelude::ConcreteDataType;
use query::parser::{QueryLanguageParser, QueryStatement};
use query::query_engine::SqlStatementExecutor;
use session::context::QueryContext;
use sql::dialect::GenericDialect;
use sql::parser::ParserContext;
use sql::statements::statement::Statement;
use super::*;
use crate::tests::test_util::create_mock_sql_handler;
use crate::tests::test_util::{create_mock_sql_handler, MockInstance};
fn parse_sql(sql: &str) -> AlterTable {
let mut stmt = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
@@ -182,4 +219,41 @@ mod tests {
_ => unreachable!(),
}
}
#[tokio::test(flavor = "multi_thread")]
async fn alter_table_by_procedure() {
let instance = MockInstance::with_procedure_enabled("alter_table_by_procedure").await;
// Create table first.
let sql = r#"create table test_alter(
host string,
ts timestamp,
cpu double default 0,
TIME INDEX (ts),
PRIMARY KEY(host)
) engine=mito with(regions=1);"#;
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
QueryStatement::Sql(sql) => sql,
_ => unreachable!(),
};
let output = instance
.inner()
.execute_sql(stmt, QueryContext::arc())
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
// Alter table.
let sql = r#"alter table test_alter add column memory double"#;
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
QueryStatement::Sql(sql) => sql,
_ => unreachable!(),
};
let output = instance
.inner()
.execute_sql(stmt, QueryContext::arc())
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
}
}

View File

@@ -1,145 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use async_compat::CompatExt;
use common_datasource::lister::{Lister, Source};
use common_datasource::object_store::{build_backend, parse_url};
use common_datasource::util::find_dir_and_filename;
use common_query::Output;
use common_recordbatch::error::DataTypesSnafu;
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
use datatypes::arrow::record_batch::RecordBatch;
use datatypes::vectors::{Helper, VectorRef};
use futures_util::TryStreamExt;
use regex::Regex;
use snafu::{ensure, ResultExt};
use table::engine::TableReference;
use table::requests::{CopyTableRequest, InsertRequest};
use tokio::io::BufReader;
use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
let table_ref = TableReference {
catalog: &req.catalog_name,
schema: &req.schema_name,
table: &req.table_name,
};
let table = self.get_table(&table_ref).await?;
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
let object_store =
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
let (dir, filename) = find_dir_and_filename(&path);
let regex = req
.pattern
.as_ref()
.map(|x| Regex::new(x))
.transpose()
.context(error::BuildRegexSnafu)?;
let source = if let Some(filename) = filename {
Source::Filename(filename)
} else {
Source::Dir
};
let lister = Lister::new(object_store.clone(), source, dir, regex);
let entries = lister.list().await.context(error::ListObjectsSnafu)?;
let mut buf: Vec<RecordBatch> = Vec::new();
for entry in entries.iter() {
let path = entry.path();
let reader = object_store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
let buf_reader = BufReader::new(reader.compat());
let builder = ParquetRecordBatchStreamBuilder::new(buf_reader)
.await
.context(error::ReadParquetSnafu)?;
ensure!(
builder.schema() == table.schema().arrow_schema(),
error::InvalidSchemaSnafu {
table_schema: table.schema().arrow_schema().to_string(),
file_schema: (*(builder.schema())).to_string()
}
);
let stream = builder
.build()
.context(error::BuildParquetRecordBatchStreamSnafu)?;
let chunk = stream
.try_collect::<Vec<_>>()
.await
.context(error::ReadParquetSnafu)?;
buf.extend(chunk.into_iter());
}
let fields = table
.schema()
.arrow_schema()
.fields()
.iter()
.map(|f| f.name().to_string())
.collect::<Vec<_>>();
// Vec<Columns>
let column_chunks = buf
.into_iter()
.map(|c| Helper::try_into_vectors(c.columns()).context(DataTypesSnafu))
.collect::<Vec<_>>();
let mut futs = Vec::with_capacity(column_chunks.len());
for column_chunk in column_chunks.into_iter() {
let column_chunk = column_chunk.context(error::ParseDataTypesSnafu)?;
let columns_values = fields
.iter()
.cloned()
.zip(column_chunk.into_iter())
.collect::<HashMap<String, VectorRef>>();
futs.push(table.insert(InsertRequest {
catalog_name: req.catalog_name.to_string(),
schema_name: req.schema_name.to_string(),
table_name: req.table_name.to_string(),
columns_values,
//TODO: support multi-regions
region_number: 0,
}))
}
let result = futures::future::try_join_all(futs)
.await
.context(error::InsertSnafu {
table_name: req.table_name.to_string(),
})?;
Ok(Output::AffectedRows(result.iter().sum()))
}
}

View File

@@ -326,7 +326,8 @@ mod tests {
use common_base::readable_size::ReadableSize;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::Schema;
use query::parser::QueryLanguageParser;
use query::parser::{QueryLanguageParser, QueryStatement};
use query::query_engine::SqlStatementExecutor;
use session::context::QueryContext;
use sql::dialect::GenericDialect;
use sql::parser::ParserContext;
@@ -560,10 +561,10 @@ mod tests {
TIME INDEX (ts),
PRIMARY KEY(host)
) engine=mito with(regions=1);"#;
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
let Ok(QueryStatement::Sql(stmt)) = QueryLanguageParser::parse_sql(sql) else { unreachable!() };
let output = instance
.inner()
.execute_stmt(stmt, QueryContext::arc())
.execute_sql(stmt, QueryContext::arc())
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
@@ -577,10 +578,10 @@ mod tests {
TIME INDEX (ts),
PRIMARY KEY(host)
) engine=mito with(regions=1);"#;
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
let Ok(QueryStatement::Sql(stmt)) = QueryLanguageParser::parse_sql(sql) else { unreachable!() };
let output = instance
.inner()
.execute_stmt(stmt, QueryContext::arc())
.execute_sql(stmt, QueryContext::arc())
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));

View File

@@ -10,6 +10,7 @@ test = []
[dependencies]
arrow.workspace = true
arrow-array.workspace = true
arrow-schema.workspace = true
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }

View File

@@ -362,7 +362,7 @@ mod tests {
ConcreteDataType::String(_)
));
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::List(Box::new(Field::new(
ConcreteDataType::from_arrow_type(&ArrowDataType::List(Arc::new(Field::new(
"item",
ArrowDataType::Int32,
true,

View File

@@ -272,7 +272,7 @@ impl TryFrom<Arc<ArrowSchema>> for Schema {
let mut column_schemas = Vec::with_capacity(arrow_schema.fields.len());
let mut name_to_index = HashMap::with_capacity(arrow_schema.fields.len());
for field in &arrow_schema.fields {
let column_schema = ColumnSchema::try_from(field)?;
let column_schema = ColumnSchema::try_from(field.as_ref())?;
name_to_index.insert(field.name().to_string(), column_schemas.len());
column_schemas.push(column_schema);
}

View File

@@ -125,11 +125,12 @@ mod tests {
#[test]
fn test_to_serde_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
let ts = TimestampSecond::new(123);
let val = serde_json::Value::from(ts);
match val {
serde_json::Value::String(s) => {
assert_eq!("1970-01-01 00:02:03+0000", s);
assert_eq!("1970-01-01 08:02:03+0800", s);
}
_ => unreachable!(),
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow::datatypes::{DataType as ArrowDataType, Field};
use serde::{Deserialize, Serialize};
@@ -63,7 +65,7 @@ impl DataType for ListType {
}
fn as_arrow_type(&self) -> ArrowDataType {
let field = Box::new(Field::new("item", self.item_type.as_arrow_type(), true));
let field = Arc::new(Field::new("item", self.item_type.as_arrow_type(), true));
ArrowDataType::List(field)
}
@@ -94,7 +96,7 @@ mod tests {
t.default_value()
);
assert_eq!(
ArrowDataType::List(Box::new(Field::new("item", ArrowDataType::Boolean, true))),
ArrowDataType::List(Arc::new(Field::new("item", ArrowDataType::Boolean, true))),
t.as_arrow_type()
);
assert_eq!(ConcreteDataType::boolean_datatype(), *t.item_type());

View File

@@ -15,6 +15,7 @@
use std::cmp::Ordering;
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::sync::Arc;
use arrow::datatypes::{DataType as ArrowDataType, Field};
use common_base::bytes::{Bytes, StringBytes};
@@ -271,7 +272,7 @@ fn to_null_value(output_type: &ConcreteDataType) -> ScalarValue {
ConcreteDataType::DateTime(_) => ScalarValue::Date64(None),
ConcreteDataType::Timestamp(t) => timestamp_to_scalar_value(t.unit(), None),
ConcreteDataType::List(_) => {
ScalarValue::List(None, Box::new(new_item_field(output_type.as_arrow_type())))
ScalarValue::List(None, Arc::new(new_item_field(output_type.as_arrow_type())))
}
ConcreteDataType::Dictionary(dict) => ScalarValue::Dictionary(
Box::new(dict.key_type().as_arrow_type()),
@@ -490,7 +491,7 @@ impl ListValue {
Ok(ScalarValue::List(
vs,
Box::new(new_item_field(output_type.item_type().as_arrow_type())),
Arc::new(new_item_field(output_type.item_type().as_arrow_type())),
))
}
}
@@ -1345,6 +1346,7 @@ mod tests {
#[test]
fn test_display() {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(Value::Null.to_string(), "Null");
assert_eq!(Value::UInt8(8).to_string(), "8");
assert_eq!(Value::UInt16(16).to_string(), "16");
@@ -1366,11 +1368,11 @@ mod tests {
assert_eq!(Value::Date(Date::new(0)).to_string(), "1970-01-01");
assert_eq!(
Value::DateTime(DateTime::new(0)).to_string(),
"1970-01-01 00:00:00"
"1970-01-01 08:00:00+0800"
);
assert_eq!(
Value::Timestamp(Timestamp::new(1000, TimeUnit::Millisecond)).to_string(),
"1970-01-01 00:00:01+0000"
"1970-01-01 08:00:01+0800"
);
assert_eq!(
Value::List(ListValue::new(

View File

@@ -217,8 +217,7 @@ macro_rules! impl_try_from_arrow_array_for_vector {
.with_context(|| crate::error::ConversionSnafu {
from: std::format!("{:?}", array.as_ref().data_type()),
})?
.data()
.clone();
.to_data();
let concrete_array = $Array::from(data);
Ok($Vector::from(concrete_array))
@@ -229,7 +228,7 @@ macro_rules! impl_try_from_arrow_array_for_vector {
macro_rules! impl_validity_for_vector {
($array: expr) => {
Validity::from_array_data($array.data())
Validity::from_array_data($array.to_data())
};
}

View File

@@ -38,13 +38,7 @@ impl BinaryVector {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
}
fn from_array_data(data: ArrayData) -> BinaryVector {
BinaryVector {
array: BinaryArray::from(data),
}
self.array.to_data()
}
}
@@ -106,8 +100,8 @@ impl Vector for BinaryVector {
}
fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.data().slice(offset, length);
Arc::new(Self::from_array_data(data))
let array = self.array.slice(offset, length);
Arc::new(Self { array })
}
fn get(&self, index: usize) -> Value {
@@ -286,7 +280,7 @@ mod tests {
#[test]
fn test_from_arrow_array() {
let arrow_array = BinaryArray::from_iter_values([vec![1, 2, 3], vec![1, 2, 3]]);
let original = BinaryArray::from(arrow_array.data().clone());
let original = BinaryArray::from(arrow_array.to_data());
let vector = BinaryVector::from(arrow_array);
assert_eq!(original, vector.array);
}

View File

@@ -44,7 +44,7 @@ impl BooleanVector {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> BooleanVector {
@@ -132,7 +132,7 @@ impl Vector for BooleanVector {
}
fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.data().slice(offset, length);
let data = self.array.to_data().slice(offset, length);
Arc::new(Self::from_array_data(data))
}
@@ -259,7 +259,7 @@ mod tests {
assert!(!v.is_const());
assert!(v.validity().is_all_valid());
assert!(!v.only_null());
assert_eq!(64, v.memory_size());
assert_eq!(2, v.memory_size());
for (i, b) in bools.iter().enumerate() {
assert!(!v.is_null(i));

View File

@@ -24,7 +24,7 @@ pub type DateVectorBuilder = PrimitiveVectorBuilder<DateType>;
mod tests {
use std::sync::Arc;
use arrow::array::Array;
use arrow_array::ArrayRef;
use common_time::date::Date;
use super::*;
@@ -84,7 +84,7 @@ mod tests {
#[test]
fn test_date_from_arrow() {
let vector = DateVector::from_slice([1, 2]);
let arrow = vector.as_arrow().slice(0, vector.len());
let arrow: ArrayRef = Arc::new(vector.as_arrow().slice(0, vector.len()));
let vector2 = DateVector::try_from_arrow_array(&arrow).unwrap();
assert_eq!(vector, vector2);
}

View File

@@ -25,6 +25,7 @@ mod tests {
use std::sync::Arc;
use arrow::array::{Array, PrimitiveArray};
use arrow_array::ArrayRef;
use common_time::DateTime;
use datafusion_common::from_slice::FromSlice;
@@ -37,6 +38,7 @@ mod tests {
#[test]
fn test_datetime_vector() {
std::env::set_var("TZ", "Asia/Shanghai");
let v = DateTimeVector::new(PrimitiveArray::from_slice([1, 2, 3]));
assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
assert_eq!(3, v.len());
@@ -63,7 +65,7 @@ mod tests {
unreachable!()
}
assert_eq!(
"[\"1970-01-01 00:00:01\",\"1970-01-01 00:00:02\",\"1970-01-01 00:00:03\"]",
"[\"1970-01-01 08:00:01+0800\",\"1970-01-01 08:00:02+0800\",\"1970-01-01 08:00:03+0800\"]",
serde_json::to_string(&v.serialize_to_json().unwrap()).unwrap()
);
}
@@ -107,8 +109,8 @@ mod tests {
#[test]
fn test_datetime_from_arrow() {
let vector = DateTimeVector::from_wrapper_slice([DateTime::new(1), DateTime::new(2)]);
let arrow = vector.as_arrow().slice(0, vector.len());
let vector2 = DateTimeVector::try_from_arrow_array(&arrow).unwrap();
let arrow: ArrayRef = Arc::new(vector.as_arrow().slice(0, vector.len())) as _;
let vector2 = DateTimeVector::try_from_arrow_array(arrow).unwrap();
assert_eq!(vector, vector2);
}
}

View File

@@ -238,16 +238,18 @@ impl Helper {
ArrowDataType::Date64 => Arc::new(DateTimeVector::try_from_arrow_array(array)?),
ArrowDataType::List(_) => Arc::new(ListVector::try_from_arrow_array(array)?),
ArrowDataType::Timestamp(unit, _) => match unit {
TimeUnit::Second => Arc::new(TimestampSecondVector::try_from_arrow_array(array)?),
TimeUnit::Millisecond => {
Arc::new(TimestampMillisecondVector::try_from_arrow_array(array)?)
}
TimeUnit::Microsecond => {
Arc::new(TimestampMicrosecondVector::try_from_arrow_array(array)?)
}
TimeUnit::Nanosecond => {
Arc::new(TimestampNanosecondVector::try_from_arrow_array(array)?)
}
TimeUnit::Second => Arc::new(
TimestampSecondVector::try_from_arrow_timestamp_array(array)?,
),
TimeUnit::Millisecond => Arc::new(
TimestampMillisecondVector::try_from_arrow_timestamp_array(array)?,
),
TimeUnit::Microsecond => Arc::new(
TimestampMicrosecondVector::try_from_arrow_timestamp_array(array)?,
),
TimeUnit::Nanosecond => Arc::new(
TimestampNanosecondVector::try_from_arrow_timestamp_array(array)?,
),
},
ArrowDataType::Float16
| ArrowDataType::Time32(_)
@@ -260,7 +262,7 @@ impl Helper {
| ArrowDataType::LargeList(_)
| ArrowDataType::FixedSizeList(_, _)
| ArrowDataType::Struct(_)
| ArrowDataType::Union(_, _, _)
| ArrowDataType::Union(_, _)
| ArrowDataType::Dictionary(_, _)
| ArrowDataType::Decimal128(_, _)
| ArrowDataType::Decimal256(_, _)
@@ -357,7 +359,7 @@ mod tests {
ScalarValue::Int32(Some(1)),
ScalarValue::Int32(Some(2)),
]),
Box::new(Field::new("item", ArrowDataType::Int32, true)),
Arc::new(Field::new("item", ArrowDataType::Int32, true)),
);
let vector = Helper::try_from_scalar_value(value, 3).unwrap();
assert_eq!(

View File

@@ -47,7 +47,7 @@ impl ListVector {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
self.array.to_data()
}
fn from_array_data_and_type(data: ArrayData, item_type: ConcreteDataType) -> Self {
@@ -106,7 +106,7 @@ impl Vector for ListVector {
}
fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.data().slice(offset, length);
let data = self.array.to_data().slice(offset, length);
Arc::new(Self::from_array_data_and_type(data, self.item_type.clone()))
}
@@ -345,7 +345,7 @@ impl ScalarVectorBuilder for ListVectorBuilder {
let len = self.len();
let values_vector = self.values_builder.to_vector();
let values_arr = values_vector.to_arrow_array();
let values_data = values_arr.data();
let values_data = values_arr.to_data();
let offset_buffer = self.offsets_builder.finish();
let null_bit_buffer = self.null_buffer_builder.finish();
@@ -355,7 +355,7 @@ impl ScalarVectorBuilder for ListVectorBuilder {
let array_data_builder = ArrayData::builder(data_type)
.len(len)
.add_buffer(offset_buffer)
.add_child_data(values_data.clone())
.add_child_data(values_data)
.null_bit_buffer(null_bit_buffer);
let array_data = unsafe { array_data_builder.build_unchecked() };

View File

@@ -46,7 +46,7 @@ impl NullVector {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
self.array.to_data()
}
}

View File

@@ -18,7 +18,10 @@ use std::sync::Arc;
use arrow::array::{
Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef, PrimitiveArray, PrimitiveBuilder,
TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
TimestampSecondArray,
};
use arrow_schema::DataType;
use serde_json::Value as JsonValue;
use snafu::OptionExt;
@@ -64,12 +67,49 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
.with_context(|| error::ConversionSnafu {
from: format!("{:?}", array.as_ref().data_type()),
})?
.data()
.clone();
.to_data();
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(data);
Ok(Self::new(concrete_array))
}
/// Converts arrow timestamp array to vectors, ignoring time zone info.
pub fn try_from_arrow_timestamp_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let array = array.as_ref();
let array_data = match array.data_type() {
DataType::Timestamp(unit, _) => match unit {
arrow_schema::TimeUnit::Second => array
.as_any()
.downcast_ref::<TimestampSecondArray>()
.unwrap()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Millisecond => array
.as_any()
.downcast_ref::<TimestampMillisecondArray>()
.unwrap()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Microsecond => array
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Nanosecond => array
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.unwrap()
.with_timezone_opt(None::<String>)
.to_data(),
},
_ => {
unreachable!()
}
};
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data);
Ok(Self::new(concrete_array))
}
pub fn from_slice<P: AsRef<[T::Native]>>(slice: P) -> Self {
let iter = slice.as_ref().iter().copied();
Self {
@@ -101,7 +141,7 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> Self {
@@ -112,7 +152,7 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
// To distinguish with `Vector::slice()`.
fn get_slice(&self, offset: usize, length: usize) -> Self {
let data = self.array.data().slice(offset, length);
let data = self.array.to_data().slice(offset, length);
Self::from_array_data(data)
}
}
@@ -161,7 +201,7 @@ impl<T: LogicalPrimitiveType> Vector for PrimitiveVector<T> {
}
fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.data().slice(offset, length);
let data = self.array.to_data().slice(offset, length);
Arc::new(Self::from_array_data(data))
}

View File

@@ -38,7 +38,7 @@ impl StringVector {
}
fn to_array_data(&self) -> ArrayData {
self.array.data().clone()
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> Self {
@@ -146,7 +146,7 @@ impl Vector for StringVector {
}
fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.data().slice(offset, length);
let data = self.array.to_data().slice(offset, length);
Arc::new(Self::from_array_data(data))
}

View File

@@ -16,10 +16,10 @@ use arrow::array::ArrayData;
use arrow::buffer::NullBuffer;
#[derive(Debug, PartialEq)]
enum ValidityKind<'a> {
enum ValidityKind {
/// Whether the array slot is valid or not (null).
Slots {
bitmap: &'a NullBuffer,
bitmap: NullBuffer,
len: usize,
null_count: usize,
},
@@ -31,17 +31,17 @@ enum ValidityKind<'a> {
/// Validity of a vector.
#[derive(Debug, PartialEq)]
pub struct Validity<'a> {
kind: ValidityKind<'a>,
pub struct Validity {
kind: ValidityKind,
}
impl<'a> Validity<'a> {
impl Validity {
/// Creates a `Validity` from [`ArrayData`].
pub fn from_array_data(data: &'a ArrayData) -> Validity<'a> {
pub fn from_array_data(data: ArrayData) -> Validity {
match data.nulls() {
Some(bitmap) => Validity {
Some(null_buf) => Validity {
kind: ValidityKind::Slots {
bitmap,
bitmap: null_buf.clone(),
len: data.len(),
null_count: data.null_count(),
},
@@ -51,14 +51,14 @@ impl<'a> Validity<'a> {
}
/// Returns `Validity` that all elements are valid.
pub fn all_valid(len: usize) -> Validity<'a> {
pub fn all_valid(len: usize) -> Validity {
Validity {
kind: ValidityKind::AllValid { len },
}
}
/// Returns `Validity` that all elements are null.
pub fn all_null(len: usize) -> Validity<'a> {
pub fn all_null(len: usize) -> Validity {
Validity {
kind: ValidityKind::AllNull { len },
}
@@ -66,9 +66,9 @@ impl<'a> Validity<'a> {
/// Returns whether `i-th` bit is set.
pub fn is_set(&self, i: usize) -> bool {
match self.kind {
match &self.kind {
ValidityKind::Slots { bitmap, .. } => bitmap.is_valid(i),
ValidityKind::AllValid { len } => i < len,
ValidityKind::AllValid { len } => i < *len,
ValidityKind::AllNull { .. } => false,
}
}
@@ -136,7 +136,7 @@ mod tests {
#[test]
fn test_from_array_data() {
let array = Int32Array::from_iter([None, Some(1), None]);
let validity = Validity::from_array_data(array.data());
let validity = Validity::from_array_data(array.to_data());
assert_eq!(2, validity.null_count());
assert!(!validity.is_set(0));
assert!(validity.is_set(1));
@@ -145,13 +145,13 @@ mod tests {
assert!(!validity.is_all_valid());
let array = Int32Array::from_iter([None, None]);
let validity = Validity::from_array_data(array.data());
let validity = Validity::from_array_data(array.to_data());
assert!(validity.is_all_null());
assert!(!validity.is_all_valid());
assert_eq!(2, validity.null_count());
let array = Int32Array::from_iter_values([1, 2]);
let validity = Validity::from_array_data(array.data());
let validity = Validity::from_array_data(array.to_data());
assert!(!validity.is_all_null());
assert!(validity.is_all_valid());
assert_eq!(0, validity.null_count());

View File

@@ -0,0 +1,32 @@
[package]
name = "file-table-engine"
version.workspace = true
edition.workspace = true
license.workspace = true
[features]
default = []
test = ["common-test-util"]
[dependencies]
async-trait = "0.1"
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-procedure = { path = "../common/procedure" }
common-query = { path = "../common/query" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
futures.workspace = true
object-store = { path = "../object-store" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu.workspace = true
storage = { path = "../storage" }
store-api = { path = "../store-api" }
table = { path = "../table" }
common-test-util = { path = "../common/test-util", optional = true }
tokio.workspace = true
[dev-dependencies]
common-test-util = { path = "../common/test-util" }

View File

@@ -0,0 +1,16 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[derive(Debug, Clone, Default)]
pub struct EngineConfig {}

View File

@@ -0,0 +1,21 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod immutable;
#[cfg(test)]
mod tests;
use table::metadata::TableVersion;
const INIT_TABLE_VERSION: TableVersion = 0;

View File

@@ -0,0 +1,400 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::IMMUTABLE_FILE_ENGINE;
use common_error::prelude::BoxedError;
use common_telemetry::{debug, logging};
use datatypes::schema::Schema;
use object_store::ObjectStore;
use snafu::ResultExt;
use table::engine::{table_dir, EngineContext, TableEngine, TableReference};
use table::error::TableOperationSnafu;
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::{error as table_error, Result as TableResult, Table, TableRef};
use tokio::sync::Mutex;
use crate::config::EngineConfig;
use crate::engine::INIT_TABLE_VERSION;
use crate::error::{
BuildTableInfoSnafu, BuildTableMetaSnafu, DropTableSnafu, InvalidRawSchemaSnafu, Result,
TableExistsSnafu,
};
use crate::manifest::immutable::{delete_table_manifest, ImmutableMetadata};
use crate::manifest::table_manifest_dir;
use crate::table::immutable::{ImmutableFileTable, ImmutableFileTableRef};
/// [TableEngine] implementation.
#[derive(Clone)]
pub struct ImmutableFileTableEngine {
inner: Arc<EngineInner>,
}
#[async_trait]
impl TableEngine for ImmutableFileTableEngine {
fn name(&self) -> &str {
IMMUTABLE_FILE_ENGINE
}
async fn create_table(
&self,
ctx: &EngineContext,
request: CreateTableRequest,
) -> TableResult<TableRef> {
self.inner
.create_table(ctx, request)
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)
}
async fn open_table(
&self,
ctx: &EngineContext,
request: OpenTableRequest,
) -> TableResult<Option<TableRef>> {
self.inner
.open_table(ctx, request)
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)
}
async fn alter_table(
&self,
_ctx: &EngineContext,
_req: AlterTableRequest,
) -> TableResult<TableRef> {
table_error::UnsupportedSnafu {
operation: "ALTER TABLE",
}
.fail()
}
fn get_table(
&self,
_ctx: &EngineContext,
table_ref: &TableReference,
) -> TableResult<Option<TableRef>> {
Ok(self.inner.get_table(table_ref))
}
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
self.inner.get_table(table_ref).is_some()
}
async fn drop_table(
&self,
_ctx: &EngineContext,
request: DropTableRequest,
) -> TableResult<bool> {
self.inner
.drop_table(request)
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)
}
async fn close(&self) -> TableResult<()> {
self.inner.close().await
}
}
#[cfg(test)]
impl ImmutableFileTableEngine {
pub async fn close_table(&self, table_ref: &TableReference<'_>) -> TableResult<()> {
self.inner.close_table(table_ref).await
}
}
impl ImmutableFileTableEngine {
pub fn new(config: EngineConfig, object_store: ObjectStore) -> Self {
ImmutableFileTableEngine {
inner: Arc::new(EngineInner::new(config, object_store)),
}
}
}
struct EngineInner {
/// All tables opened by the engine. Map key is formatted [TableReference].
///
/// Writing to `tables` should also hold the `table_mutex`.
tables: RwLock<HashMap<String, ImmutableFileTableRef>>,
object_store: ObjectStore,
/// Table mutex is used to protect the operations such as creating/opening/closing
/// a table, to avoid things like opening the same table simultaneously.
table_mutex: Mutex<()>,
}
impl EngineInner {
pub fn new(_config: EngineConfig, object_store: ObjectStore) -> Self {
EngineInner {
tables: RwLock::new(HashMap::default()),
object_store,
table_mutex: Mutex::new(()),
}
}
async fn create_table(
&self,
_ctx: &EngineContext,
request: CreateTableRequest,
) -> Result<TableRef> {
let CreateTableRequest {
catalog_name,
schema_name,
table_name,
create_if_not_exists,
table_options,
..
} = request;
let table_ref = TableReference {
catalog: &catalog_name,
schema: &schema_name,
table: &table_name,
};
if let Some(table) = self.get_table(&table_ref) {
return if create_if_not_exists {
Ok(table)
} else {
TableExistsSnafu { table_name }.fail()
};
}
let table_schema =
Arc::new(Schema::try_from(request.schema).context(InvalidRawSchemaSnafu)?);
let table_id = request.id;
let table_dir = table_dir(&catalog_name, &schema_name, table_id);
let table_full_name = table_ref.to_string();
let _lock = self.table_mutex.lock().await;
// Checks again, read lock should be enough since we are guarded by the mutex.
if let Some(table) = self.get_table_by_full_name(&table_full_name) {
return if request.create_if_not_exists {
Ok(table)
} else {
TableExistsSnafu { table_name }.fail()
};
}
let table_meta = TableMetaBuilder::new_external_table()
.schema(table_schema)
.engine(IMMUTABLE_FILE_ENGINE)
.options(table_options)
.build()
.context(BuildTableMetaSnafu {
table_name: &table_full_name,
})?;
let table_info = TableInfoBuilder::new(&table_name, table_meta)
.ident(table_id)
.table_version(INIT_TABLE_VERSION)
.table_type(TableType::Base)
.catalog_name(catalog_name.to_string())
.schema_name(schema_name.to_string())
.desc(request.desc)
.build()
.context(BuildTableInfoSnafu {
table_name: &table_full_name,
})?;
let table = Arc::new(
ImmutableFileTable::create(
&table_full_name,
&table_dir,
table_info,
self.object_store.clone(),
)
.await?,
);
logging::info!(
"Immutable file engine created table: {} in schema: {}, table_id: {}.",
table_name,
schema_name,
table_id
);
self.tables
.write()
.unwrap()
.insert(table_full_name, table.clone());
Ok(table)
}
fn get_table_by_full_name(&self, full_name: &str) -> Option<TableRef> {
self.tables
.read()
.unwrap()
.get(full_name)
.cloned()
.map(|table| table as _)
}
fn get_table(&self, table_ref: &TableReference) -> Option<TableRef> {
self.get_table_by_full_name(&table_ref.to_string())
}
async fn open_table(
&self,
_ctx: &EngineContext,
request: OpenTableRequest,
) -> TableResult<Option<TableRef>> {
let OpenTableRequest {
catalog_name,
schema_name,
table_name,
..
} = request;
let table_ref = TableReference {
catalog: &catalog_name,
schema: &schema_name,
table: &table_name,
};
let table_full_name = table_ref.to_string();
if let Some(table) = self.get_table_by_full_name(&table_full_name) {
return Ok(Some(table));
}
let table = {
let _lock = self.table_mutex.lock().await;
// Checks again, read lock should be enough since we are guarded by the mutex.
if let Some(table) = self.get_table_by_full_name(&table_full_name) {
return Ok(Some(table));
}
let table_id = request.table_id;
let table_dir = table_dir(&catalog_name, &schema_name, table_id);
let (metadata, table_info) = self
.recover_table_manifest_and_info(&table_full_name, &table_dir)
.await
.map_err(BoxedError::new)
.context(TableOperationSnafu)?;
debug!(
"Opening table {}, table info recovered: {:?}",
table_id, table_info
);
let table = Arc::new(ImmutableFileTable::new(table_info, metadata));
self.tables
.write()
.unwrap()
.insert(table_full_name, table.clone());
Some(table as _)
};
logging::info!(
"Immutable file engine opened table: {} in schema: {}",
table_name,
schema_name
);
Ok(table)
}
async fn drop_table(&self, req: DropTableRequest) -> Result<bool> {
let table_ref = TableReference {
catalog: &req.catalog_name,
schema: &req.schema_name,
table: &req.table_name,
};
let table_full_name = table_ref.to_string();
let _lock = self.table_mutex.lock().await;
if let Some(table) = self.get_table_by_full_name(&table_full_name) {
let table_id = table.table_info().ident.table_id;
let table_dir = table_dir(&req.catalog_name, &req.schema_name, table_id);
delete_table_manifest(
&table_full_name,
&table_manifest_dir(&table_dir),
&self.object_store,
)
.await
.map_err(BoxedError::new)
.context(DropTableSnafu {
table_name: &table_full_name,
})?;
self.tables.write().unwrap().remove(&table_full_name);
Ok(true)
} else {
Ok(false)
}
}
async fn close(&self) -> TableResult<()> {
let _lock = self.table_mutex.lock().await;
let tables = self.tables.read().unwrap().clone();
futures::future::try_join_all(tables.values().map(|t| t.close()))
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
// Releases all closed table
self.tables.write().unwrap().clear();
Ok(())
}
async fn recover_table_manifest_and_info(
&self,
table_name: &str,
table_dir: &str,
) -> Result<(ImmutableMetadata, TableInfo)> {
ImmutableFileTable::recover_table_info(
table_name,
&table_manifest_dir(table_dir),
&self.object_store,
)
.await
}
}
#[cfg(test)]
impl EngineInner {
pub async fn close_table(&self, table_ref: &TableReference<'_>) -> TableResult<()> {
let full_name = table_ref.to_string();
let _lock = self.table_mutex.lock().await;
if let Some(table) = self.get_table_by_full_name(&full_name) {
table
.close()
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
}
self.tables.write().unwrap().remove(&full_name);
Ok(())
}
}

View File

@@ -0,0 +1,189 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::assert_matches::assert_matches;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, IMMUTABLE_FILE_ENGINE};
use table::engine::{EngineContext, TableEngine, TableReference};
use table::requests::{AlterKind, AlterTableRequest, DropTableRequest, OpenTableRequest};
use table::{error as table_error, Table};
use crate::manifest::immutable::manifest_path;
use crate::table::immutable::ImmutableFileTable;
use crate::test_util::{self, TestEngineComponents, TEST_TABLE_NAME};
#[tokio::test]
async fn test_get_table() {
let TestEngineComponents {
table_engine,
table_ref: table,
dir: _dir,
..
} = test_util::setup_test_engine_and_table("test_get_table").await;
let table_info = table.table_info();
let table_ref = TableReference {
catalog: &table_info.catalog_name,
schema: &table_info.schema_name,
table: &table_info.name,
};
let got = table_engine
.get_table(&EngineContext::default(), &table_ref)
.unwrap()
.unwrap();
assert_eq!(table.schema(), got.schema());
}
#[tokio::test]
async fn test_open_table() {
common_telemetry::init_default_ut_logging();
let ctx = EngineContext::default();
let open_req = OpenTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: test_util::TEST_TABLE_NAME.to_string(),
// the test table id is 1
table_id: 1,
};
let table_ref = TableReference {
catalog: DEFAULT_CATALOG_NAME,
schema: DEFAULT_SCHEMA_NAME,
table: test_util::TEST_TABLE_NAME,
};
let TestEngineComponents {
table_engine,
table_ref: table,
dir: _dir,
..
} = test_util::setup_test_engine_and_table("test_open_table").await;
assert_eq!(IMMUTABLE_FILE_ENGINE, table_engine.name());
table_engine.close_table(&table_ref).await.unwrap();
let reopened = table_engine
.open_table(&ctx, open_req.clone())
.await
.unwrap()
.unwrap();
let reopened = reopened
.as_any()
.downcast_ref::<ImmutableFileTable>()
.unwrap();
let left = table.table_info();
let right = reopened.table_info();
// assert recovered table_info is correct
assert_eq!(left, right);
}
#[tokio::test]
async fn test_close_all_table() {
common_telemetry::init_default_ut_logging();
let table_ref = TableReference {
catalog: DEFAULT_CATALOG_NAME,
schema: DEFAULT_SCHEMA_NAME,
table: test_util::TEST_TABLE_NAME,
};
let TestEngineComponents {
table_engine,
dir: _dir,
..
} = test_util::setup_test_engine_and_table("test_close_all_table").await;
table_engine.close().await.unwrap();
let exist = table_engine.table_exists(&EngineContext::default(), &table_ref);
assert!(!exist);
}
#[tokio::test]
async fn test_alter_table() {
common_telemetry::init_default_ut_logging();
let TestEngineComponents {
table_engine,
dir: _dir,
..
} = test_util::setup_test_engine_and_table("test_alter_table").await;
let alter_req = AlterTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: TEST_TABLE_NAME.to_string(),
alter_kind: AlterKind::RenameTable {
new_table_name: "foo".to_string(),
},
};
let unsupported = table_engine
.alter_table(&EngineContext::default(), alter_req)
.await
.err()
.unwrap();
assert_matches!(unsupported, table_error::Error::Unsupported { .. })
}
#[tokio::test]
async fn test_drop_table() {
common_telemetry::init_default_ut_logging();
let drop_req = DropTableRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: TEST_TABLE_NAME.to_string(),
};
let TestEngineComponents {
table_engine,
object_store,
dir: _dir,
table_dir,
table_ref: table,
..
} = test_util::setup_test_engine_and_table("test_drop_table").await;
let table_info = table.table_info();
let table_ref = TableReference {
catalog: &table_info.catalog_name,
schema: &table_info.schema_name,
table: &table_info.name,
};
let dropped = table_engine
.drop_table(&EngineContext::default(), drop_req)
.await
.unwrap();
assert!(dropped);
let exist = table_engine.table_exists(&EngineContext::default(), &table_ref);
assert!(!exist);
// check table_dir manifest
let exist = object_store
.is_exist(&manifest_path(&table_dir))
.await
.unwrap();
assert!(!exist);
}

View File

@@ -0,0 +1,161 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
use serde_json::error::Error as JsonError;
use snafu::Location;
use table::metadata::{TableInfoBuilderError, TableMetaBuilderError};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to check object from path: {}, source: {}", path, source))]
CheckObject {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Fail to encode object into json, source: {}", source))]
EncodeJson {
location: Location,
source: JsonError,
},
#[snafu(display("Fail to decode object from json, source: {}", source))]
DecodeJson {
location: Location,
source: JsonError,
},
#[snafu(display("Failed to drop table, table: {}, source: {}", table_name, source))]
DropTable {
source: BoxedError,
table_name: String,
location: Location,
},
#[snafu(display(
"Failed to write table manifest, table: {}, source: {}",
table_name,
source,
))]
WriteTableManifest {
source: object_store::Error,
table_name: String,
location: Location,
},
#[snafu(display("Failed to write immutable manifest, path: {}", path))]
WriteImmutableManifest { path: String, location: Location },
#[snafu(display("Failed to delete table table manifest, source: {}", source,))]
DeleteTableManifest {
source: object_store::Error,
table_name: String,
location: Location,
},
#[snafu(display(
"Failed to read table manifest, table: {}, source: {}",
table_name,
source,
))]
ReadTableManifest {
source: object_store::Error,
table_name: String,
location: Location,
},
#[snafu(display(
"Failed to build table meta for table: {}, source: {}",
table_name,
source
))]
BuildTableMeta {
source: TableMetaBuilderError,
table_name: String,
location: Location,
},
#[snafu(display(
"Failed to build table info for table: {}, source: {}",
table_name,
source
))]
BuildTableInfo {
source: TableInfoBuilderError,
table_name: String,
location: Location,
},
#[snafu(display("Table already exists: {}", table_name))]
TableExists {
location: Location,
table_name: String,
},
#[snafu(display(
"Failed to convert metadata from deserialized data, source: {}",
source
))]
ConvertRaw {
#[snafu(backtrace)]
source: table::metadata::ConvertError,
},
#[snafu(display("Invalid schema, source: {}", source))]
InvalidRawSchema {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
TableExists { .. }
| BuildTableMeta { .. }
| BuildTableInfo { .. }
| InvalidRawSchema { .. } => StatusCode::InvalidArguments,
WriteTableManifest { .. }
| DeleteTableManifest { .. }
| ReadTableManifest { .. }
| CheckObject { .. } => StatusCode::StorageUnavailable,
EncodeJson { .. }
| DecodeJson { .. }
| ConvertRaw { .. }
| DropTable { .. }
| WriteImmutableManifest { .. } => StatusCode::Unexpected,
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl From<Error> for common_procedure::Error {
fn from(e: Error) -> common_procedure::Error {
common_procedure::Error::from_error_ext(e)
}
}

View File

@@ -0,0 +1,23 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
pub mod config;
pub mod engine;
pub mod error;
pub mod manifest;
pub mod table;
#[cfg(any(test, feature = "test"))]
pub(crate) mod test_util;

View File

@@ -0,0 +1,20 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod immutable;
#[inline]
pub fn table_manifest_dir(table_dir: &str) -> String {
format!("{table_dir}/manifest/")
}

View File

@@ -0,0 +1,192 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use object_store::ObjectStore;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use table::metadata::RawTableInfo;
use crate::error::{
CheckObjectSnafu, DecodeJsonSnafu, DeleteTableManifestSnafu, EncodeJsonSnafu,
ReadTableManifestSnafu, Result, WriteImmutableManifestSnafu, WriteTableManifestSnafu,
};
pub type MetadataVersion = u32;
pub const INIT_META_VERSION: MetadataVersion = 0;
const IMMUTABLE_MANIFEST_FILE: &str = "_immutable_manifest";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct ImmutableMetadata {
pub table_info: RawTableInfo,
pub version: MetadataVersion,
}
fn encode_metadata(item: &ImmutableMetadata) -> Result<Vec<u8>> {
serde_json::to_vec(&item).context(EncodeJsonSnafu)
}
fn decode_metadata(src: &[u8]) -> Result<ImmutableMetadata> {
serde_json::from_slice(src).context(DecodeJsonSnafu)
}
pub fn manifest_path(dir: &str) -> String {
format!("{}{}", dir, IMMUTABLE_MANIFEST_FILE)
}
pub(crate) async fn delete_table_manifest(
table_name: &str,
dir: &str,
object_store: &ObjectStore,
) -> Result<()> {
object_store
.delete(&manifest_path(dir))
.await
.context(DeleteTableManifestSnafu { table_name })
}
pub(crate) async fn write_table_manifest(
table_name: &str,
dir: &str,
object_store: &ObjectStore,
metadata: &ImmutableMetadata,
) -> Result<()> {
let path = &manifest_path(dir);
let exist = object_store
.is_exist(path)
.await
.context(CheckObjectSnafu { path })?;
ensure!(!exist, WriteImmutableManifestSnafu { path });
let bs = encode_metadata(metadata)?;
object_store
.write(path, bs)
.await
.context(WriteTableManifestSnafu { table_name })
}
pub(crate) async fn read_table_manifest(
table_name: &str,
dir: &str,
object_store: &ObjectStore,
) -> Result<ImmutableMetadata> {
let path = manifest_path(dir);
let bs = object_store
.read(&path)
.await
.context(ReadTableManifestSnafu { table_name })?;
decode_metadata(&bs)
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use super::*;
use crate::error::Error;
use crate::manifest::table_manifest_dir;
use crate::test_util::{build_test_table_metadata, new_test_object_store, TEST_TABLE_NAME};
#[tokio::test]
async fn test_write_table_manifest() {
let (_dir, store) = new_test_object_store("test_write_table_manifest");
let metadata = build_test_table_metadata();
write_table_manifest(
TEST_TABLE_NAME,
&table_manifest_dir(TEST_TABLE_NAME),
&store,
&metadata,
)
.await
.unwrap();
// try to overwrite immutable manifest
let write_immutable = write_table_manifest(
TEST_TABLE_NAME,
&table_manifest_dir(TEST_TABLE_NAME),
&store,
&metadata,
)
.await
.unwrap_err();
assert_matches!(write_immutable, Error::WriteImmutableManifest { .. })
}
#[tokio::test]
async fn test_read_table_manifest() {
let (_dir, store) = new_test_object_store("test_read_table_manifest");
let metadata = build_test_table_metadata();
write_table_manifest(
TEST_TABLE_NAME,
&table_manifest_dir(TEST_TABLE_NAME),
&store,
&metadata,
)
.await
.unwrap();
let read = read_table_manifest(
TEST_TABLE_NAME,
&table_manifest_dir(TEST_TABLE_NAME),
&store,
)
.await
.unwrap();
assert_eq!(read, metadata);
}
#[tokio::test]
async fn test_read_non_exist_table_manifest() {
let (_dir, store) = new_test_object_store("test_read_non_exist_table_manifest");
let not_fount = read_table_manifest(
TEST_TABLE_NAME,
&table_manifest_dir(TEST_TABLE_NAME),
&store,
)
.await
.unwrap_err();
assert_matches!(not_fount, Error::ReadTableManifest { .. })
}
#[tokio::test]
async fn test_delete_table_manifest() {
let (_dir, store) = new_test_object_store("test_delete_table_manifest");
let metadata = build_test_table_metadata();
let table_dir = &table_manifest_dir(TEST_TABLE_NAME);
write_table_manifest(TEST_TABLE_NAME, table_dir, &store, &metadata)
.await
.unwrap();
let exist = store.is_exist(&manifest_path(table_dir)).await.unwrap();
assert!(exist);
delete_table_manifest(TEST_TABLE_NAME, table_dir, &store)
.await
.unwrap();
let exist = store.is_exist(&manifest_path(table_dir)).await.unwrap();
assert!(!exist);
}
}

View File

@@ -0,0 +1,15 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod immutable;

View File

@@ -0,0 +1,130 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use common_query::physical_plan::PhysicalPlanRef;
use common_query::prelude::Expr;
use datatypes::schema::SchemaRef;
use object_store::ObjectStore;
use snafu::ResultExt;
use store_api::storage::RegionNumber;
use table::error::Result as TableResult;
use table::metadata::{RawTableInfo, TableInfo, TableInfoRef, TableType};
use table::Table;
use crate::error::{ConvertRawSnafu, Result};
use crate::manifest::immutable::{
read_table_manifest, write_table_manifest, ImmutableMetadata, INIT_META_VERSION,
};
use crate::manifest::table_manifest_dir;
pub struct ImmutableFileTable {
metadata: ImmutableMetadata,
// currently, it's immutable
table_info: Arc<TableInfo>,
}
pub type ImmutableFileTableRef = Arc<ImmutableFileTable>;
#[async_trait]
impl Table for ImmutableFileTable {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
self.table_info().meta.schema.clone()
}
fn table_type(&self) -> TableType {
self.table_info().table_type
}
fn table_info(&self) -> TableInfoRef {
self.table_info.clone()
}
async fn scan(
&self,
_projection: Option<&Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> TableResult<PhysicalPlanRef> {
todo!()
}
async fn flush(
&self,
_region_number: Option<RegionNumber>,
_wait: Option<bool>,
) -> TableResult<()> {
// nothing to flush
Ok(())
}
async fn close(&self) -> TableResult<()> {
Ok(())
}
}
impl ImmutableFileTable {
#[inline]
pub fn metadata(&self) -> &ImmutableMetadata {
&self.metadata
}
pub(crate) fn new(table_info: TableInfo, metadata: ImmutableMetadata) -> Self {
Self {
metadata,
table_info: Arc::new(table_info),
}
}
pub async fn create(
table_name: &str,
table_dir: &str,
table_info: TableInfo,
object_store: ObjectStore,
) -> Result<ImmutableFileTable> {
let metadata = ImmutableMetadata {
table_info: RawTableInfo::from(table_info.clone()),
version: INIT_META_VERSION,
};
write_table_manifest(
table_name,
&table_manifest_dir(table_dir),
&object_store,
&metadata,
)
.await?;
Ok(ImmutableFileTable::new(table_info, metadata))
}
pub(crate) async fn recover_table_info(
table_name: &str,
table_dir: &str,
object_store: &ObjectStore,
) -> Result<(ImmutableMetadata, TableInfo)> {
let metadata = read_table_manifest(table_name, table_dir, object_store).await?;
let table_info =
TableInfo::try_from(metadata.table_info.clone()).context(ConvertRawSnafu)?;
Ok((metadata, table_info))
}
}

View File

@@ -0,0 +1,144 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_catalog::consts::IMMUTABLE_FILE_ENGINE;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
use object_store::services::Fs;
use object_store::ObjectStore;
use table::engine::{table_dir, EngineContext, TableEngine};
use table::metadata::{RawTableInfo, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
use table::requests::{CreateTableRequest, TableOptions};
use table::TableRef;
use crate::config::EngineConfig;
use crate::engine::immutable::ImmutableFileTableEngine;
use crate::manifest::immutable::ImmutableMetadata;
pub const TEST_TABLE_NAME: &str = "demo";
pub fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) {
let dir = create_temp_dir(prefix);
let store_dir = dir.path().to_string_lossy();
let mut builder = Fs::default();
builder.root(&store_dir);
(dir, ObjectStore::new(builder).unwrap().finish())
}
pub fn test_schema() -> Schema {
let column_schemas = vec![
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
// Nullable value column: cpu
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
// Non-null value column: memory
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), false),
ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_datatype(common_time::timestamp::TimeUnit::Millisecond),
true,
)
.with_time_index(true),
];
SchemaBuilder::try_from(column_schemas)
.unwrap()
.build()
.expect("ts must be timestamp column")
}
pub fn build_test_table_info() -> TableInfo {
let schema = test_schema();
let table_meta = TableMetaBuilder::new_external_table()
.schema(Arc::new(schema))
.engine(IMMUTABLE_FILE_ENGINE)
.build()
.unwrap();
TableInfoBuilder::new(TEST_TABLE_NAME, table_meta)
.table_version(0)
.table_type(TableType::Base)
.catalog_name("greptime".to_string())
.schema_name("public".to_string())
.build()
.unwrap()
}
pub fn build_test_table_metadata() -> ImmutableMetadata {
let table_info = build_test_table_info();
ImmutableMetadata {
table_info: RawTableInfo::from(table_info),
version: 0,
}
}
pub struct TestEngineComponents {
pub table_engine: ImmutableFileTableEngine,
pub table_ref: TableRef,
pub schema_ref: SchemaRef,
pub object_store: ObjectStore,
pub table_dir: String,
pub dir: TempDir,
}
pub fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
CreateTableRequest {
id: 1,
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: TEST_TABLE_NAME.to_string(),
desc: Some("a test table".to_string()),
schema: RawSchema::from(&*schema),
region_numbers: vec![0],
create_if_not_exists: true,
primary_key_indices: vec![0],
table_options: TableOptions::default(),
engine: IMMUTABLE_FILE_ENGINE.to_string(),
}
}
pub async fn setup_test_engine_and_table(prefix: &str) -> TestEngineComponents {
let (dir, object_store) = new_test_object_store(prefix);
let table_engine = ImmutableFileTableEngine::new(EngineConfig::default(), object_store.clone());
let schema_ref = Arc::new(test_schema());
let table_ref = table_engine
.create_table(
&EngineContext::default(),
new_create_request(schema_ref.clone()),
)
.await
.unwrap();
let table_info = table_ref.table_info();
let table_dir = table_dir(
&table_info.catalog_name,
&table_info.schema_name,
table_info.ident.table_id,
);
TestEngineComponents {
table_engine,
table_ref,
schema_ref,
object_store,
table_dir,
dir,
}
}

View File

@@ -10,6 +10,7 @@ python = ["dep:script"]
[dependencies]
api = { path = "../api" }
async-compat = "0.2"
async-stream.workspace = true
async-trait = "0.1"
catalog = { path = "../catalog" }
@@ -17,6 +18,7 @@ chrono.workspace = true
client = { path = "../client" }
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-datasource = { path = "../common/datasource" }
common-error = { path = "../common/error" }
common-function = { path = "../common/function" }
common-grpc = { path = "../common/grpc" }
@@ -36,11 +38,12 @@ itertools = "0.10"
meta-client = { path = "../meta-client" }
mito = { path = "../mito", features = ["test"] }
moka = { version = "0.9", features = ["future"] }
object-store = { path = "../object-store" }
openmetrics-parser = "0.4"
partition = { path = "../partition" }
prost.workspace = true
query = { path = "../query" }
rustls = "0.20"
regex = "1.6"
script = { path = "../script", features = ["python"], optional = true }
serde = "1.0"
serde_json = "1.0"
@@ -48,6 +51,7 @@ servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
sql = { path = "../sql" }
storage = { path = "../storage" }
store-api = { path = "../store-api" }
substrait = { path = "../common/substrait" }
table = { path = "../table" }

View File

@@ -32,12 +32,14 @@ use catalog::{
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
SchemaProvider, SchemaProviderRef,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::BoxedError;
use common_telemetry::error;
use futures::StreamExt;
use meta_client::rpc::TableName;
use partition::manager::PartitionRuleManagerRef;
use snafu::prelude::*;
use table::table::numbers::NumbersTable;
use table::TableRef;
use crate::datanode::DatanodeClients;
@@ -373,17 +375,21 @@ impl SchemaProvider for FrontendSchemaProvider {
std::thread::spawn(|| {
common_runtime::block_on_read(async move {
let mut tables = vec![];
if catalog_name == DEFAULT_CATALOG_NAME && schema_name == DEFAULT_SCHEMA_NAME {
tables.push("numbers".to_string());
}
let key = build_table_global_prefix(catalog_name, schema_name);
let mut iter = backend.range(key.as_bytes());
let mut res = HashSet::new();
while let Some(r) = iter.next().await {
let Kv(k, _) = r?;
let key = TableGlobalKey::parse(String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
res.insert(key.table_name);
tables.push(key.table_name);
}
Ok(res.into_iter().collect())
Ok(tables)
})
})
.join()
@@ -391,6 +397,13 @@ impl SchemaProvider for FrontendSchemaProvider {
}
async fn table(&self, name: &str) -> catalog::error::Result<Option<TableRef>> {
if self.catalog_name == DEFAULT_CATALOG_NAME
&& self.schema_name == DEFAULT_SCHEMA_NAME
&& name == "numbers"
{
return Ok(Some(Arc::new(NumbersTable::default())));
}
let table_global_key = TableGlobalKey {
catalog_name: self.catalog_name.clone(),
schema_name: self.schema_name.clone(),

Some files were not shown because too many files have changed in this diff Show More