Compare commits

...

96 Commits

Author SHA1 Message Date
Ruihang Xia
6d64e1c296 feat(mito): checkpoint for mito2 (#2142)
* basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adjust dir structure

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sort result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* downgrade log level

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply CR sugg.

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add region id to log

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-13 09:26:01 +00:00
Yingwen
e6090a8d5b feat(mito): Write wal and memtable (#2135)
* feat: hold wal entry in RegionWriteCtx

* feat: entry id and commited sequence

* feat: write to wal

* feat: write memtable

* feat: fill missing columns

* feat: validate write request

* feat: more validation to write request

* chore: fix typos

* feat: remove init and validate rows in new()

* style: fix clippy
2023-08-12 07:44:44 +00:00
谢政
b62e643e92 build: update protobuf-build to support apple silicon (#2143)
* build: update protobuf-build to support apple silicon

* build: Update src/log-store/Cargo.toml

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* build: update the Cargo.lock too

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-12 03:31:51 +00:00
dennis zhuang
6f40128058 feat!: enable telemetry by default (#2137)
* feat: remove greptimedb-telemetry feature

* feat: adds enable_telemetry option to metasrv and datanode

* refactor: move data_home from file config to storage config

* feat: store the installation uuid into datanode and metasrv working home

* fix: cargo toml fmt

* test: ignore region failver test when using local fle storage

* test: ignore telemetry reporter in test mode

* feat: print warning log when enabling telemetry

* chore: the telemetry doc link

* chore: remove enable_telemetry from datanode example config file

* refactor: rename GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT

* chore: rename print_warn_log to print_anonymous_usage_data_disclaimer
2023-08-11 14:50:40 +00:00
LFC
0b05c22be1 fix: make "explain" executable in repl (#2157) 2023-08-11 20:21:40 +08:00
Ruihang Xia
4fd1057764 fix: several clippy error/warnings after upgrading toolchain (#2156)
* fix pyscripts mod

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy::needless-pass-by-ref-mut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add pyo3 feature gate in Makefile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-11 20:20:21 +08:00
Zou Wei
6877d082f6 feat: compatible with postgres interval type (#2146)
* feat: impl ToSql/FromSql/ToSqlText for PgInterval.

* chore: remove useless code.

* feat: compatible with postgres interval type.

* chore: cr comment.
2023-08-11 20:19:57 +08:00
LFC
2dcc67769e fix: runs sqlness test on windows-latest-8-cores (#2158) 2023-08-11 17:34:58 +08:00
Ruihang Xia
b9bac2b195 fix: let information_schema know itself (#2149)
* rename show create table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* register information_schema on registering catalog

* fix tests in standalone

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix frontend catalog manager

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy & typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tweak sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename constructor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo (again)

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove redundent clones

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-11 15:37:27 +08:00
Zou Wei
584acca09d feat: impl duration type (#2117)
* feat: impl duration type in common time.

* feat: convert from/to std::time::Duration.

* refactor: convert function
2023-08-11 07:04:42 +00:00
LFC
ad2021a8d8 feat: print build output if it's failed in sqlness (#2152)
* feat: print build output if it's failed in sqlness

* feat: print build output if it's failed in sqlness
2023-08-11 03:34:15 +00:00
zyy17
c970c206d1 ci: add retry for uploading artifacts to s3 (#2147) 2023-08-10 12:59:04 +00:00
LFC
5c19913a91 build: on windows (#2054)
* build on windows

* rebase develop

* fix: resolve PR comments
2023-08-10 08:08:37 +00:00
zyy17
587a24e7fb ci: add working dir and some minor changes of create-version.sh (#2133)
* ci: add context argument in build-greptime-binary action

* refactor: add 'working-dir' in upload-artifacts action and rename 'context' to 'working-dir'

* refactor: use timestamp as part of image tag when trigger manually
2023-08-10 04:46:43 +00:00
Ning Sun
0270708d6d fix: correct grpc metric labels (#2136) 2023-08-10 03:59:41 +00:00
WU Jingdi
b7319fe2b1 feat: Support RangeSelect LogicalPlan rewrite (#2058)
* feat: Support RangeSelect LogicalPlan rewrite

* chore: fix code advice

* fix: change format of range_fn

* chore: optimize project plan rewrite

* chore: fix code advice
2023-08-10 02:53:20 +00:00
LFC
ea3708b33d fix: deserialize TableInfoValue with missing field (#2134) 2023-08-10 02:43:24 +00:00
Zhenchi
7abe71f399 fix(table): return correct table types (#2131)
* fix(table): return correct table types

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>

* fix: NumbersTable to be Temporary table

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>

* fix(test): fix affected cases

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>

* fix(test): fix affected cases

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>

* fix: fmt

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>

* fix(tests): fix instance_test expected result

* retrigger action

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>
2023-08-09 11:07:00 +00:00
Ruihang Xia
b156225b80 fix: correct the schema used by TypeConversionRule (#2132)
* fix: correct the schema used by TypeConversionRule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* specify time zone in UT

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-09 08:18:17 +00:00
zyy17
2ac51c6348 fix: set the correct working dir before building the artifacts (#2129) 2023-08-09 14:34:29 +08:00
Ning Sun
7f5f8749da test: add conditional compilation flag for datanode mock module (#2130) 2023-08-09 06:10:54 +00:00
Yingwen
d4e863882f feat: Add write method to memtable trait (#2123)
* feat: validate semantic type

* feat: define KeyValues

* test: test semantic type check

* feat: impl KeyValues

* test: test KeyValues

* feat: Add write to Memtable

* style: fix clippy

* docs: more comment
2023-08-09 04:07:50 +00:00
Ning Sun
d18eb18b32 feat: use server inferenced types on statement describe (#2032)
* feat: use server inferenced types on statement describe

* feat: add support for server inferenced type

* feat: allow parameter type inferencing

* chore: update comments

* fix: lint issue

* style: comfort rustfmt

* Update src/servers/src/postgres/types.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-08-09 02:57:56 +00:00
liyang
aa6452c86c chore: rename dockerhub registry password (#2127) 2023-08-09 02:28:56 +00:00
zyy17
d44cd9c6f5 fix: add 'image-name' argument to correct the invalid image namespace(mix with image-name) (#2126) 2023-08-09 10:04:11 +08:00
gongzhengyang
ce0f909cac perf: change current schema and catalog to borrow, clone only necessary (#2116)
perf: change current schema and catalog to borrow, clone only when necessary

Co-authored-by: gongzhengyang <gongzhengyang@bolean.com.cn>
2023-08-08 12:48:24 +00:00
Ruihang Xia
4c693799d8 fix: bugs related to merge scan (#2118)
* fix: prevent optimize merge scan, mark distinct as unsupported

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix some other problems

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix unit tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove deadcode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add some comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/optimizer/type_conversion.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-08-08 11:42:57 +00:00
Vanish
57836e762b feat: truncate table in standalone mode (#2090)
* feat: impl table procedure in standalone mode

* chore: remove useless changes

* test: add some tests

* Update src/table-procedure/src/truncate.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* CR

* Update src/datanode/src/sql/truncate_table.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: fmt

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-08-08 11:23:36 +00:00
zyy17
d927ab1ce5 ci: add 'upload-to-s3' option and disable it in dev build (#2124) 2023-08-08 11:22:24 +00:00
Ning Sun
c39de9072f refactor: use workspace dependencies for internal modules (#2119)
* refactor: use workspace dependencies for internal modules

* fix: resolve issue with mock module in datanode

* refactor: update test modules
2023-08-08 11:02:34 +00:00
zyy17
815a6d2d61 fix: var compare error(yet another stupid mistake) (#2122) 2023-08-08 17:39:53 +08:00
zyy17
f1f8a1d3a9 ci: fix incorrect variable name (#2121) 2023-08-08 17:20:11 +08:00
zyy17
e7abd00fc0 ci: fix error import path (#2120) 2023-08-08 17:12:54 +08:00
zyy17
5e2fdec1b6 ci: add dev-build (#2114) 2023-08-08 07:58:59 +00:00
Lei, HUANG
2d9ea595cb chore!: change logstore namespace prefix (#1998)
* chore: change logstore namespace prefix

* chore: change delimiter
2023-08-08 07:36:46 +00:00
LFC
46fa3eb629 chore: upgrade rust toolchain to latest nightly (#2049)
* chore: upgrade rust toolchain to latest nightly

* rebase develop

* update rust toolchain in ci
2023-08-08 07:17:51 +00:00
Weny Xu
7d0d8dc6e3 feat: return metasrv leader addr (#2110) 2023-08-07 10:01:42 +00:00
Zhenchi
f8d152231d feat(information_schema): implement table_factory method (#2108)
* feat(information_schema): implement table_factory method

* refactor(catalog): simplify table_factory method

* Update src/table/src/data_source.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-07 08:07:25 +00:00
Weny Xu
c8cb1ef5bc feat: add schema and catalog key migration tool (#2048)
* feat: add schema and catalog key migration tool

* chore: apply suggestions from CR
2023-08-07 06:22:05 +00:00
Zou Wei
d5cadeeec3 feat: conversion between interval and gRPC (#2064)
* feat: support grpc for interval type

* chore: add unit test cases.

* chore: cargo clippy

* chore: modify greptime-proto version

* chore: cr comment.

* chore: cargo fmt

* refactor: convert function.
2023-08-07 06:22:04 +00:00
Ruihang Xia
7210b35d86 docs: rfc of refactoring table trait (#2106)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-07 02:55:19 +00:00
Vanish
cf7e8c9142 feat: truncate region (#2097)
* feat: impl truncate region

* test: test truncate region

* chore: typo

* refactor: table truncate

* chore: remove useless changes

* chore: reset version

* fix: wait for flush task to complete

* fix: clippy

* chore: remove useless changes

* CR

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/engine.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/engine.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/region.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/region/tests/truncate.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/region/tests/truncate.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/region/writer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* CR

* Update src/storage/src/engine.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/storage/src/manifest/region.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-08-04 12:26:25 +00:00
Yingwen
cb4dd89754 feat(mito): Implement mito2 Wal (#2103)
* feat: define wal struct

* feat: Implement Wal read/write

* feat: obsolete wal

* test: test wal

* refactor: use try_stream and remove async from scan
2023-08-04 11:04:25 +00:00
zyy17
9139962070 fix: fix version output empty error: '$GITHUB_ENV' -> '$GITHUB_OUTPUT' (#2104) 2023-08-04 17:48:11 +08:00
Ruihang Xia
9718aa17c9 feat: define region group and sequence (#2100)
* define region group

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* define region sequence

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* check partition number

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test region seq and group

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-04 09:08:07 +00:00
Ruihang Xia
18896739d8 fix: disable region failover in sqlness test (#2102)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-04 08:38:40 +00:00
zyy17
8bcad936d3 fix: wrong action url prompt (#2099)
fix: wrong action url
2023-08-04 07:39:02 +00:00
shuiyisong
7efff2d704 fix: introduce taplo.toml and sort Cargo.toml (#2096)
* fix: add taplo.toml

* fix: introduce taplo.toml & sort cargo.toml

* chore: remove option in ci too
2023-08-04 06:44:45 +00:00
Ning Sun
93cd4ab89d ci: require cargo.lock up to date (#2094) 2023-08-04 02:59:01 +00:00
Yingwen
e5663a075f feat(mito): preparation to implementing write (#2085)
* refactor: move request mod

* feat: add mutation

* feat: add handle_write mod

* feat: one mutation at a time

* feat: handle write requests

* feat: validate schema

* refactor: move schema check to write request

* feat: add convert value

* feat: fill default values

* chore: remove comments

* feat: remove code

* feat: remove code

* feat: buf requests

* style: fix clippy

* refactor: rename check functions

* chore: fix compile error

* chore: Revert "feat: remove code"

This reverts commit 6516597540.

* chore: Revert "feat: remove code"

This reverts commit 5f2b790a01.

* chore: upgrade greptime-proto

* chore: Update comment

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-08-04 02:53:02 +00:00
zyy17
ac81d3c74f fix: add the missing 'NIGHTLY_RELEASE_PREFIX' and fail fast in 'allocate-runners' job (#2093) 2023-08-04 02:51:47 +00:00
JeremyHi
7987e08ca2 chore: typo (#2092) 2023-08-04 01:38:17 +00:00
Eugene Tolbakov
1492700acc fix(timestamp): add trim for the input date string (#2078)
* fix(timestamp): add trim for the input date string

* fix(timestamp): add analyzer rule to trim strings before conversion

* fix: adjust according to CR
2023-08-03 23:33:47 +00:00
shuiyisong
6f1094db0a fix: arc() usage in non-test code (#2091)
* chore: try fix arc issue

* chore: move `parse_catalog_and_schema_from_client_database_name` to catalog crate

* fix: arc issue

* fix: arc issue

* fix: arc issue

* fix: arc issue

* fix: minor change
2023-08-03 10:16:02 +00:00
zyy17
21655cb56f ci: add nightly build workflow (#2089) 2023-08-03 09:11:39 +00:00
Ruihang Xia
5f0403c245 feat: improve /label and /labels APIs in prometheus server (#2087)
* support __name__ for /label

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make match[] in labels optional

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-08-03 07:51:08 +00:00
fys
d7002caca7 chore: add meter-core dependency (#2088) 2023-08-03 07:24:34 +00:00
fys
dda922507f feat: impl pubsub in metasrv (#2045)
* feat: impl pubsub

* add test_subscriber_disconnect unit test

* chore: cr

* cr

* cr
2023-08-03 03:56:43 +00:00
Yingwen
fdd4929c8f refactor(mito): mv mito2 request (#2086)
* refactor: mv request mod to crate level

* refactor: mv SkippedFields
2023-08-03 03:38:46 +00:00
zyy17
90b2200cc8 chore!: modify install.sh to adapt the new release package format (#2077)
chore: modify install.sh to adapt the new release package format
2023-08-03 02:09:31 +00:00
Vanish
e3a079a142 fix: session features (#2084) 2023-08-02 13:39:17 +00:00
discord9
c55841988e feat: necessary Hash derive for types (#2075)
* feat: necessary derive for types

* impl (Partial)Ord for ConcreteDataType
2023-08-02 13:08:43 +00:00
zyy17
279df2e558 fix: incorrect argument name: 'disable_run_tests' -> 'disable-run-tests' (#2079)
fix: 'disable_run_tests' -> 'disable-run-tests'
2023-08-02 11:16:56 +00:00
Ning Sun
7a27ef8d11 fix: remove openssl from reqwest and use rustls instead (#2081)
* fix: remove openssl from reqwest and use rustls instead

* fix: correct server url

* style: fix toml format
2023-08-02 10:23:21 +00:00
zyy17
be8f243c64 chore: update Cargo.lock (#2068) 2023-08-02 15:23:16 +08:00
zyy17
e1edb87017 fix: add the missing 'TARGET' in Makefile (#2066) 2023-08-02 06:42:43 +00:00
Ruihang Xia
bbbeaa709b fix(deps): update greptime-proto rev to the one after merge (#2063)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-02 06:33:10 +00:00
Weny Xu
4626c2efe5 feat: add Catalog and Schema Manager (#2037)
* feat: add Range Stream

* feat: add catalog and schema manager

* feat: enhance KeyValueDecoderFn

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2023-08-02 03:56:29 +00:00
Ruihang Xia
346c52eb72 docs: update SDK list (#2062)
* docs: update SDK list

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* correct py url

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-02 02:31:43 +00:00
zyy17
47a796c0ba fix: incorrect github token secret name (#2061)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-08-02 02:20:49 +00:00
shuiyisong
5eb2c609a3 fix: auth in grpc (#2056)
* fix: auth in grpc

* fix: change to return err

* fix: add grpc test

* fix: add http test

* fix: add mysql and pg test
2023-08-01 15:18:31 +00:00
zyy17
7d76131469 chore: modify the directory of release bucket and remove unused files (#2059) 2023-08-01 13:07:13 +00:00
Ruihang Xia
a3fa455f31 docs: rfc of metric engine (#1925)
* docs: rfc of metric engine

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add drawback section

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sections about physical impl and meta routing

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add chart about region id group

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typos

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-01 11:26:48 +00:00
JeremyHi
fd7eb87a52 refactor: common semantic-type (#2057) 2023-08-01 11:18:05 +00:00
Sunray Ley
090b7e61ca feat: make the gRPC channel between Frontend and Datanode configurable (#2044)
* feat: expose frontend datanode_client_options

* chore: add configuration options to the configuration file

* refactor(frontend): extract DatanodeOptions to service_config

* refactor(frontend): extract DatanodeOptions to service_config

* style: remove unnecessary suffix in variable name

Co-authored-by: Yingwen <realevenyag@gmail.com>

* feat: use humantime_serde for readable duration

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-08-01 10:49:41 +00:00
Yingwen
c529c8a41b feat(mito): Implement open and close for mito2 regions (#2052)
* feat: add close request

* feat: handle close and open request

* feat: Implement open

* test: add TestEnv::new

* feat: close region/engine and test

* style: fix clippy

* style: import log macros

* docs: update docs

* docs: add mermaid for manifest manager
2023-08-01 10:49:07 +00:00
gongzhengyang
0eac56a442 chore: remove unused dependencies (#2055)
Co-authored-by: gongzhengyang <gongzhengyang@bolean.com.cn>
2023-08-01 07:43:03 +00:00
Ruihang Xia
44f3ed2f74 chore(deps): bump datafusion to the latest commit (#1967)
* bump deps

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile except pyo3 backend

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix promql-parser metric name matcher

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyarrow convert

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyo3 compiling

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove deadcode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update stream adapter display format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix physical optimizer rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-08-01 02:10:49 +00:00
Ruihang Xia
5bd80a74ab feat: prepare for implementing considering partition key in the distributed planner (#2000)
* basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix frontend logic

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* check substrait compatibility before pushdown

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* going to revert some rules

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test and clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove println

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-07-31 12:36:23 +00:00
Ruihang Xia
bddaf265a9 chore(ci): run clippy, coverage and sqlness in parallel (#2050)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-07-31 10:37:30 +00:00
Yingwen
4d5ecb54c5 feat(mito): Implement open for RegionManifestManager (#2036)
* feat: file purger trait

* feat: Implement open for RegionManifestManager

* feat: remove RegionVersion

* feat: Use RwLock

* chore: remove AtomicManifestVersion

* feat: Remove unused error

* feat: store meta action

* chore: update comment
2023-07-31 10:04:22 +00:00
shuiyisong
922d826347 chore: make tables() return kv instead of key only (#2047)
* chore: make tables return kv

* chore: remove comment code
2023-07-31 07:30:47 +00:00
localhost
7681864eb4 chore: add version reporter (#2007)
* chore: add version reporter

* chore: add uuid for version report

* chore: add file license

* chore: format code

* chore: fix by pr comment

* chore: change version report api url

* chore: change greptimedb opentelemetry crate name

* chore: minor code beautification

* chore: add keys only option when range etcd

* chore: fix by pr comment

* chore: fix by pr comment

* chore: change uuid file location

* chore: only run telemetry in meta leader

* chore: add more test and some minor fix

* chore: make clippy happy

* chore: fix by pr comment

* chore: fix by pr comment

* chore: add debug log for greptimedb telemetry
2023-07-31 06:58:00 +00:00
zyy17
45832475d0 feat: rewrite the release pipeline to make it clean (#2038)
* refactor: modify cache path of Dockerfile

* feat: rewrite the release pipeline to make it clean
2023-07-31 04:57:04 +00:00
Zou Wei
7727508485 feat: impl interval type (#1952)
* feat: impl interval type in common time

* feat: impl datatype, vectors, value for interval

    pick 0c1d9f297 feat: impl interval type in common time
    pick d528c647f feat: impl datatype, vectors, value for interval
    pick 1e12dd5c7 comments update
    pick 74103e36c add license header

* comments update

* add license header

* cargo clippy

* refactor interval type

* add unit test and case to dummy.sql

* cargo clippy

* chore: add doc comments

* chore: cargo fmt

* feat: add formats, refactor comparison

* add docs comments

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr comment

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-07-31 03:54:39 +00:00
zyy17
216f220007 fix: restore 'aarch64/compile-python.sh' to fix the failed release temporarily (#2046)
fix: add 'aarch64/compile-python.sh' back to fix release failed temporarily

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-07-31 03:38:27 +00:00
Niwaka
695398652c feat: accept influxdb request without timestamp even if table doesn't exist (#2041)
* feat: accept influxdb request without timestamp even if table doesn't exist

* refactor: InsertRequests::try_from

* feat: check row number
2023-07-31 02:55:09 +00:00
parkma99
fc6ebf58b4 refactor: create_current_timestamp_vector by using VectorOp::cast (#2042)
* refactor using VectorOp cast

* add test case
2023-07-31 02:51:06 +00:00
Zou Wei
f22b787fd9 chore: return error in arrow array convert function (#2043)
fix: return error instead of unreachable!()
2023-07-31 02:47:40 +00:00
Lei, HUANG
81ea61ba43 fix: window inferer (#2033)
* fix: window inferer

* chore: rename
2023-07-26 02:18:19 +00:00
zyy17
662879ff4b refactor: don't set the build jobs when nproc is not found (#2034)
refactor: don't set the build jobs when nproc not found
2023-07-25 13:40:44 +00:00
LFC
48996b0646 fix: etcd range pagenation in table metadata migration tool (#2035) 2023-07-25 10:02:26 +00:00
fys
0b4ac987cd refactor: arrange lease kvs randomly in lease_based selector (#2028)
* refactor: arrange lease kvs randomly in lease_based selector

* fix: cr
2023-07-25 07:32:10 +00:00
shuiyisong
9c1f0234de refactor: query context (#2022)
* chore: refactor query_context

* chore: remove use statement

* chore: make query_context build return arc

* fix: sqlness test

* fix: cr issue

* fix: use unwrap or else
2023-07-25 06:11:34 +00:00
Ruihang Xia
f55bff51ac feat: set and retrieve trace id in log macro (#2016)
* trace id passed by task local store

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* modify log macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove tokio::spawn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use real trace id

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-07-25 03:50:27 +00:00
Weny Xu
0fc0f74cd7 fix: fix parking_lot unresolved (#2025) 2023-07-25 03:20:12 +00:00
498 changed files with 18603 additions and 4996 deletions

View File

@@ -12,5 +12,9 @@ rustflags = [
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
"-Aclippy::items_after_test_module",
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
# nightly-2023-08-07, we do need it to be borrowed mutably.
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
"-Aclippy::needless_pass_by_ref_mut",
]

View File

@@ -1,2 +1,3 @@
[profile.default]
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }
retries = { backoff = "exponential", count = 3, delay = "10s", jitter = true }

View File

@@ -0,0 +1,70 @@
name: Build and push dev-builder image
description: Build and push dev-builder image to DockerHub and ACR
inputs:
dockerhub-image-registry:
description: The dockerhub image registry to store the images
required: false
default: docker.io
dockerhub-image-registry-username:
description: The dockerhub username to login to the image registry
required: true
dockerhub-image-registry-token:
description: The dockerhub token to login to the image registry
required: true
dockerhub-image-namespace:
description: The dockerhub namespace of the image registry to store the images
required: false
default: greptime
acr-image-registry:
description: The ACR image registry to store the images
required: true
acr-image-registry-username:
description: The ACR username to login to the image registry
required: true
acr-image-registry-password:
description: The ACR password to login to the image registry
required: true
acr-image-namespace:
description: The ACR namespace of the image registry to store the images
required: false
default: greptime
version:
description: Version of the dev-builder
required: false
default: latest
runs:
using: composite
steps:
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
registry: ${{ inputs.dockerhub-image-registry }}
username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }}
- name: Build and push dev builder image to dockerhub
shell: bash
run:
make dev-builder \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Login to ACR
uses: docker/login-action@v2
continue-on-error: true
with:
registry: ${{ inputs.acr-image-registry }}
username: ${{ inputs.acr-image-registry-username }}
password: ${{ inputs.acr-image-registry-password }}
- name: Build and push dev builder image to ACR
shell: bash
continue-on-error: true
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
make dev-builder \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}

View File

@@ -0,0 +1,62 @@
name: Build greptime binary
description: Build and upload the single linux artifact
inputs:
base-image:
description: Base image to build greptime
required: true
features:
description: Cargo features to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
version:
description: Version of the artifact
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
working-dir:
description: Working directory to build the artifacts
required: false
default: .
runs:
using: composite
steps:
- name: Build greptime binary
shell: bash
run: |
cd ${{ inputs.working-dir }} && \
make build-greptime-by-buildx \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./greptime
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
working-dir: ${{ inputs.working-dir }}

View File

@@ -0,0 +1,104 @@
name: Build greptime images
description: Build and push greptime images
inputs:
image-registry:
description: The image registry to store the images
required: true
image-registry-username:
description: The username to login to the image registry
required: true
image-registry-password:
description: The password to login to the image registry
required: true
amd64-artifact-name:
description: The name of the amd64 artifact for building images
required: true
arm64-artifact-name:
description: The name of the arm64 artifact for building images
required: false
default: ""
image-namespace:
description: The namespace of the image registry to store the images
required: true
image-name:
description: The name of the image to build
required: true
image-tag:
description: The tag of the image to build
required: true
docker-file:
description: The path to the Dockerfile to build
required: true
platforms:
description: The supported platforms to build the image
required: true
push-latest-tag:
description: Whether to push the latest tag
required: false
default: 'true'
runs:
using: composite
steps:
- name: Login to image registry
uses: docker/login-action@v2
with:
registry: ${{ inputs.image-registry }}
username: ${{ inputs.image-registry-username }}
password: ${{ inputs.image-registry-password }}
- name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 artifacts
uses: actions/download-artifact@v3
with:
name: ${{ inputs.amd64-artifact-name }}
- name: Unzip the amd64 artifacts
shell: bash
run: |
tar xvf ${{ inputs.amd64-artifact-name }}.tar.gz && \
rm ${{ inputs.amd64-artifact-name }}.tar.gz && \
rm -rf amd64 && \
mv ${{ inputs.amd64-artifact-name }} amd64
- name: Download arm64 artifacts
uses: actions/download-artifact@v3
if: ${{ inputs.arm64-artifact-name }}
with:
name: ${{ inputs.arm64-artifact-name }}
- name: Unzip the arm64 artifacts
shell: bash
if: ${{ inputs.arm64-artifact-name }}
run: |
tar xvf ${{ inputs.arm64-artifact-name }}.tar.gz && \
rm ${{ inputs.arm64-artifact-name }}.tar.gz && \
rm -rf arm64 && \
mv ${{ inputs.arm64-artifact-name }} arm64
- name: Build and push images(without latest) for amd64 and arm64
if: ${{ inputs.push-latest-tag == 'false' }}
uses: docker/build-push-action@v3
with:
context: .
file: ${{ inputs.docker-file }}
push: true
platforms: ${{ inputs.platforms }}
tags: |
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
- name: Build and push images for amd64 and arm64
if: ${{ inputs.push-latest-tag == 'true' }}
uses: docker/build-push-action@v3
with:
context: .
file: ${{ inputs.docker-file }}
push: true
platforms: ${{ inputs.platforms }}
tags: |
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}

62
.github/actions/build-images/action.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
name: Group for building greptimedb images
description: Group for building greptimedb images
inputs:
image-registry:
description: The image registry to store the images
required: true
image-namespace:
description: The namespace of the image registry to store the images
required: true
image-name:
description: The name of the image to build
required: false
default: greptimedb
image-registry-username:
description: The username to login to the image registry
required: true
image-registry-password:
description: The password to login to the image registry
required: true
version:
description: Version of the artifact
required: true
push-latest-tag:
description: Whether to push the latest tag
required: false
default: 'true'
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: 'false'
runs:
using: composite
steps:
- name: Build and push standard images to dockerhub
uses: ./.github/actions/build-greptime-images
with: # The image will be used as '${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}'
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
image-registry-username: ${{ inputs.image-registry-username }}
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }}
docker-file: docker/ci/Dockerfile
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
platforms: linux/amd64,linux/arm64
push-latest-tag: ${{ inputs.push-latest-tag }}
- name: Build and push centos images to dockerhub
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-images
with:
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
image-registry-username: ${{ inputs.image-registry-username }}
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}-centos
image-tag: ${{ inputs.version }}
docker-file: docker/ci/Dockerfile-centos
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
platforms: linux/amd64
push-latest-tag: ${{ inputs.push-latest-tag }}

View File

@@ -0,0 +1,104 @@
name: Build linux artifacts
description: Build linux artifacts
inputs:
arch:
description: Architecture to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: 'false'
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
working-dir:
description: Working directory to build the artifacts
required: false
default: .
runs:
using: composite
steps:
- name: Run integration test
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
- name: Build standard greptime
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: pyo3_backend,servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
working-dir: ${{ inputs.working-dir }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
working-dir: ${{ inputs.working-dir }}
- name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
with:
base-image: centos
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
working-dir: ${{ inputs.working-dir }}

View File

@@ -0,0 +1,105 @@
name: Build macos artifacts
description: Build macos artifacts
inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
features:
description: Cargo features to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
runs:
using: composite
steps:
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ inputs.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install protoc
shell: bash
run: |
brew install protobuf
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
- name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
run: |
brew install etcd && \
brew services start etcd
- name: Install latest nextest release # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
run: |
make test sqlness-test
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
- name: Build greptime binary
shell: bash
run: |
make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
TARGET=${{ inputs.arch }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}

View File

@@ -0,0 +1,50 @@
name: Release artifacts
description: Release artifacts
inputs:
version:
description: Version to release
required: true
runs:
using: composite
steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR}
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ...
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Create git tag for release
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
shell: bash
run: |
git tag ${{ inputs.version }}
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
- name: Set release arguments
shell: bash
run: |
if [[ "${{ inputs.version }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "prerelease=false" >> $GITHUB_ENV
echo "makeLatest=true" >> $GITHUB_ENV
echo "generateReleaseNotes=false" >> $GITHUB_ENV
else
echo "prerelease=true" >> $GITHUB_ENV
echo "makeLatest=false" >> $GITHUB_ENV
echo "generateReleaseNotes=true" >> $GITHUB_ENV
fi
- name: Publish release
uses: ncipollo/release-action@v1
with:
name: "Release ${{ inputs.version }}"
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
tag: ${{ inputs.version }}
generateReleaseNotes: ${{ env.generateReleaseNotes }}
allowUpdates: true
artifacts: |
**/greptime-*/*

67
.github/actions/start-runner/action.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: Start EC2 runner
description: Start EC2 runner
inputs:
runner:
description: The linux runner name
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
github-token:
description: The GitHub token to clone private repository
required: false
default: ""
image-id:
description: The EC2 image id
required: true
security-group-id:
description: The EC2 security group id
required: true
subnet-id:
description: The EC2 subnet id
required: true
outputs:
label:
description: "label"
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.label || inputs.runner }}
ec2-instance-id:
description: "ec2-instance-id"
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.ec2-instance-id }}
runs:
using: composite
steps:
- name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
# The EC2 runner will use the following format:
# <vm-type>-<instance-type>-<arch>
# like 'ec2-c6a.4xlarge-amd64'.
- name: Get EC2 instance type
if: startsWith(inputs.runner, 'ec2')
id: get-ec2-instance-type
shell: bash
run: |
echo "instance-type=$(echo ${{ inputs.runner }} | cut -d'-' -f2)" >> $GITHUB_OUTPUT
- name: Start EC2 runner
if: startsWith(inputs.runner, 'ec2')
uses: machulav/ec2-github-runner@v2
id: start-linux-arm64-ec2-runner
with:
mode: start
ec2-image-id: ${{ inputs.image-id }}
ec2-instance-type: ${{ steps.get-ec2-instance-type.outputs.instance-type }}
subnet-id: ${{ inputs.subnet-id }}
security-group-id: ${{ inputs.security-group-id }}
github-token: ${{ inputs.github-token }}

41
.github/actions/stop-runner/action.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Stop EC2 runner
description: Stop EC2 runner
inputs:
label:
description: The linux runner name
required: true
ec2-instance-id:
description: The EC2 instance id
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
github-token:
description: The GitHub token to clone private repository
required: false
default: ""
runs:
using: composite
steps:
- name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
- name: Stop EC2 runner
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: machulav/ec2-github-runner@v2
with:
mode: stop
label: ${{ inputs.label }}
ec2-instance-id: ${{ inputs.ec2-instance-id }}
github-token: ${{ inputs.github-token }}

View File

@@ -0,0 +1,98 @@
name: Upload artifacts
description: Upload artifacts
inputs:
artifacts-dir:
description: Directory to store artifacts
required: true
target-file:
description: The path of the target artifact
required: true
version:
description: Version of the artifact
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
working-dir:
description: Working directory to upload the artifacts
required: false
default: .
runs:
using: composite
steps:
- name: Create artifacts directory
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
mkdir -p ${{ inputs.artifacts-dir }} && \
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
# The compressed artifacts will use the following layout:
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-pyo3-v0.3.0
# └── greptime
- name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifacts-dir }}
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
- name: Upload checksum
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
- name: Configure AWS credentials
if: ${{ inputs.upload-to-s3 == 'true' }}
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
- name: Upload artifacts to S3
if: ${{ inputs.upload-to-s3 == 'true' }}
uses: nick-invision/retry@v2
with:
max_attempts: 20
timeout_minutes: 5
# The bucket layout will be:
# releases/greptimedb
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
command: |
cd ${{ inputs.working-dir }} && \
aws s3 cp \
${{ inputs.artifacts-dir }}.tar.gz \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
aws s3 cp \
${{ inputs.artifacts-dir }}.sha256sum \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum

68
.github/scripts/create-version.sh vendored Executable file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
set -e
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like 'v0.2.0-nightly-20230313';
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like 'v0.2.0-e5b243c-2023071245';
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() {
# Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty"
exit 1
fi
if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty"
exit 1
fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
echo "NIGHTLY_RELEASE_PREFIX is empty"
exit 1
fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
# It will be like 'nigtly-20230808-7d0d8dc6'.
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
exit 0
fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a dev build.
# It will be like 'dev-2023080819-f0e7216c'.
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
if [ -z "$COMMIT_SHA" ]; then
echo "COMMIT_SHA is empty in dev build"
exit 1
fi
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
exit 0
fi
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
if [ "$GITHUB_EVENT_NAME" = push ]; then
if [ -z "$GITHUB_REF_NAME" ]; then
echo "GITHUB_REF_NAME is empty in push event"
exit 1
fi
echo "$GITHUB_REF_NAME"
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
else
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
exit 1
fi
}
# You can run as following examples:
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
create_version

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-05-03
RUST_TOOLCHAIN: nightly-2023-08-07
jobs:
apidoc:

344
.github/workflows/dev-build.yml vendored Normal file
View File

@@ -0,0 +1,344 @@
# Development build only build the debug version of the artifacts manually.
name: GreptimeDB Development Build
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
repository:
description: The public repository to build
required: false
default: GreptimeTeam/greptimedb
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build
required: true
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: true
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: true
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: true
# Use env variables to control all the release process.
env:
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# Always use 'dev' to indicate it's the dev build.
NEXT_RELEASE_VERSION: dev
NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
IMAGE_NAME: greptimedb-dev
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Create version
id: create-version
run: |
version=$(./.github/scripts/create-version.sh) && \
echo $version && \
echo "version=$version" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
COMMIT_SHA: ${{ inputs.commit }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Checkout greptimedb
uses: actions/checkout@v3
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Checkout greptimedb
uses: actions/checkout@v3
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
- name: Set build result
id: set-build-result
run: |
echo "build-result=success" >> $GITHUB_OUTPUT
release-images-to-acr:
name: Build and push images to ACR
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push images to ACR
uses: ./.github/actions/build-images
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-latest
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build successful"}
- name: Notifiy nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'"}

View File

@@ -25,7 +25,7 @@ on:
name: CI
env:
RUST_TOOLCHAIN: nightly-2023-05-03
RUST_TOOLCHAIN: nightly-2023-08-07
jobs:
typos:
@@ -51,7 +51,7 @@ jobs:
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo check
run: cargo check --workspace --all-targets
run: cargo check --locked --workspace --all-targets
toml:
name: Toml Check
@@ -62,62 +62,22 @@ jobs:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
toolchain: stable
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install taplo
run: cargo install taplo-cli --version ^0.8 --locked
run: cargo +stable install taplo-cli --version ^0.8 --locked
- name: Run taplo
run: taplo format --check --option "indent_string= "
# Use coverage to run test.
# test:
# name: Test Suite
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v3
# - name: Cache LLVM and Clang
# id: cache-llvm
# uses: actions/cache@v3
# with:
# path: ./llvm
# key: llvm
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - uses: KyleMayes/install-llvm-action@v1
# with:
# version: "14.0"
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
# - uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# - name: Rust Cache
# uses: Swatinem/rust-cache@v2
# - name: Cleanup disk
# uses: curoky/cleanup-disk-action@v2.0
# with:
# retain: 'rust,llvm'
# - name: Install latest nextest release
# uses: taiki-e/install-action@nextest
# - name: Run tests
# run: cargo nextest run
# env:
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
# RUST_BACKTRACE: 1
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
# UNITTEST_LOG_DIR: "__unittest_logs"
run: taplo format --check
sqlness:
name: Sqlness Test
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -128,25 +88,14 @@ jobs:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run etcd
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Run sqlness
run: cargo sqlness && ls /tmp
run: cargo sqlness
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
fmt:
@@ -191,7 +140,6 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -236,3 +184,43 @@ jobs:
flags: rust
fail_ci_if_error: false
verbose: true
test-on-windows:
if: github.event.pull_request.draft == false
runs-on: windows-latest-8-cores
timeout-minutes: 60
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"

309
.github/workflows/nightly-build.yml vendored Normal file
View File

@@ -0,0 +1,309 @@
# Nightly build only do the following things:
# 1. Run integration tests;
# 2. Build binaries and images for linux-amd64 and linux-arm64 platform;
name: GreptimeDB Nightly Build
on:
schedule:
# Trigger at 00:00(UTC) on every day-of-week from Monday through Friday.
- cron: '0 0 * * 1-5'
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.2xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.2xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: false
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: false
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: false
# Use env variables to control all the release process.
env:
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# Always use 'nightly' to indicate it's the nightly build.
NEXT_RELEASE_VERSION: nightly
NIGHTLY_RELEASE_PREFIX: nightly
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Create version
id: create-version
run: |
version=$(./.github/scripts/create-version.sh) && \
echo $version && \
echo "version=$version" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
- name: Set nightly build result
id: set-nightly-build-result
run: |
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
release-images-to-acr:
name: Build and push images to ACR
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push images to ACR
uses: ./.github/actions/build-images
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-latest
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB nightly build successful"}
- name: Notifiy nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB nightly build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/nightly-build.yml'"}

View File

@@ -1,3 +1,8 @@
name: Release
# There are two kinds of formal release:
# 1. The tag('v*.*.*') push release: the release workflow will be triggered by the tag push event.
# 2. The scheduled release(the version will be '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD'): the release workflow will be triggered by the schedule event.
on:
push:
tags:
@@ -5,522 +10,386 @@ on:
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
# Manually trigger only builds binaries.
workflow_dispatch:
workflow_dispatch: # Allows you to run this workflow manually.
# Notes: The GitHub Actions ONLY support 10 inputs, and it's already used up.
inputs:
dry_run:
description: 'Skip docker push and release steps'
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
macos_runner:
type: choice
description: The runner uses to build macOS artifacts
default: macos-latest
options:
- macos-latest
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
skip_test:
description: 'Do not run tests during build'
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: false
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: false
build_macos_artifacts:
type: boolean
description: Build macos artifacts
required: false
default: false
release_artifacts:
type: boolean
description: Create GitHub release and upload artifacts
required: false
default: false
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: false
release_dev_builder_image:
type: boolean
description: Release dev-builder image
required: false
default: false
name: Release
# Use env variables to control all the release process.
env:
RUST_TOOLCHAIN: nightly-2023-05-03
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
SCHEDULED_PERIOD: nightly
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2023-08-07
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.4.0
jobs:
build-macos:
name: Build macOS binary
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version
id: create-version
run: |
echo "version=$(./.github/scripts/create-version.sh)" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-macos-artifacts:
name: Build macOS artifacts
strategy:
fail-fast: false
matrix:
# The file format is greptime-<os>-<arch>
include:
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: pyo3_backend,servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
needs: [
allocate-runners,
]
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
- uses: actions/checkout@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
fetch-depth: 0
- name: Install Protoc for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install protobuf
- name: Install etcd for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install etcd
brew services start etcd
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
- uses: ./.github/actions/build-macos-artifacts
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
run: |
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
chmod +x greptime
tar -zcvf ${{ matrix.file }}.tgz greptime
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Upload to S3
run: |
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
build-linux:
name: Build linux binary
strategy:
fail-fast: false
matrix:
# The file format is greptime-<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-2004-16-cores
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
- uses: actions/checkout@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
fetch-depth: 0
- name: Install Protoc for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: | # Make sure the protoc is >= 3.15
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.9/protoc-21.9-linux-x86_64.zip
unzip protoc-21.9-linux-x86_64.zip -d protoc
sudo cp protoc/bin/protoc /usr/local/bin/
sudo cp -r protoc/include/google /usr/local/include/
- name: Install etcd for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Install dependencies for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
run: |
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
chmod +x greptime
tar -zcvf ${{ matrix.file }}.tgz greptime
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Upload to S3
run: |
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
docker:
name: Build docker image
needs: [build-linux, build-macos]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
cp -r amd64 docker/ci
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
cp -r arm64 docker/ci
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build-linux, build-macos, docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
- name: Set whether it is the latest release
run: |
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "prerelease=false" >> $GITHUB_ENV
echo "makeLatest=true" >> $GITHUB_ENV
else
echo "prerelease=true" >> $GITHUB_ENV
echo "makeLatest=false" >> $GITHUB_ENV
fi
- name: Create scheduled build git tag
if: github.event_name != 'push'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: ncipollo/release-action@v1
if: github.event_name != 'push'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
tag: ${{ env.SCHEDULED_BUILD_VERSION }}
generateReleaseNotes: true
artifacts: |
**/greptime-*
- name: Publish release
uses: ncipollo/release-action@v1
if: github.event_name == 'push'
with:
name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
generateReleaseNotes: false
allowUpdates: true
artifacts: |
**/greptime-*
docker-push-acr:
name: Push docker image to alibaba cloud container registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
release-images-to-acr:
name: Build and push images to ACR
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-2004-16-cores
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to alibaba cloud container registry
uses: docker/login-action@v2
- uses: actions/checkout@v3
with:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
fetch-depth: 0
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Build and push images to ACR
uses: ./.github/actions/build-images
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
release-artifacts:
name: Create GitHub release and upload artifacts
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
build-macos-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:latest \
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Release artifacts
uses: ./.github/actions/release-artifacts
with:
version: ${{ needs.allocate-runners.outputs.version }}
release-dev-builder-image:
name: Release dev builder image
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
runs-on: ubuntu-latest-16-cores
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push dev builder image
uses: ./.github/actions/build-dev-builder-image
with:
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

1803
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,13 +11,13 @@ members = [
"src/common/error",
"src/common/function",
"src/common/function-macro",
"src/common/greptimedb-telemetry",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/meta",
"src/common/procedure",
"src/common/procedure-test",
"src/common/pprof",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
@@ -25,6 +25,7 @@ members = [
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/common/version",
"src/datanode",
"src/datatypes",
"src/file-table-engine",
@@ -49,6 +50,7 @@ members = [
"tests-integration",
"tests/runner",
]
resolver = "2"
[workspace.package]
version = "0.3.2"
@@ -56,30 +58,30 @@ edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "40.0" }
arrow-array = "40.0"
arrow-flight = "40.0"
arrow-schema = { version = "40.0", features = ["serde"] }
arrow = { version = "43.0" }
etcd-client = "0.11"
arrow-array = "43.0"
arrow-flight = "43.0"
arrow-schema = { version = "43.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
etcd-client = "0.11"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "2ceb7f927c40787773fdc466d6a4b79f3a6c0001" }
derive_builder = "0.12"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "356694a72f12ad9e15008d4245a0b4fe48f982ad" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "940694cfd05c1e93c1dd7aab486184c9e2853098" }
itertools = "0.10"
lazy_static = "1.4"
once_cell = "1.18"
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
parquet = "40.0"
parquet = "43.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
@@ -87,7 +89,9 @@ regex = "1.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.34"
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c3814f08afa19786b13d72b1731a1e8b3cac4ab9", features = [
"visitor",
] }
tempfile = "3"
tokio = { version = "1.28", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
@@ -96,6 +100,54 @@ tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
## workspaces members
api = { path = "src/api" }
catalog = { path = "src/catalog" }
client = { path = "src/client" }
cmd = { path = "src/cmd" }
common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" }
common-datasource = { path = "src/common/datasource" }
common-error = { path = "src/common/error" }
common-function = { path = "src/common/function" }
common-function-macro = { path = "src/common/function-macro" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
common-grpc-expr = { path = "src/common/grpc-expr" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-pprof = { path = "src/common/pprof" }
common-query = { path = "src/common/query" }
common-recordbatch = { path = "src/common/recordbatch" }
common-runtime = { path = "src/common/runtime" }
substrait = { path = "src/common/substrait" }
common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-table-engine = { path = "src/file-table-engine" }
frontend = { path = "src/frontend" }
log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" }
mito = { path = "src/mito" }
mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
partition = { path = "src/partition" }
promql = { path = "src/promql" }
query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
storage = { path = "src/storage" }
store-api = { path = "src/store-api" }
table = { path = "src/table" }
table-procedure = { path = "src/table-procedure" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"

View File

@@ -2,6 +2,7 @@
CARGO_PROFILE ?=
FEATURES ?=
TARGET_DIR ?=
TARGET ?=
CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
@@ -16,23 +17,32 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ETCD_VERSION ?= v3.5.9
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
RETRY_COUNT ?= 3
BUILD_JOBS ?= $(shell expr $$(nproc) / 2)
NEXTEST_OPTS := --retries ${RETRY_COUNT}
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
BUILD_JOBS := 1
endif
ifdef CARGO_PROFILE
ifneq ($(strip $(BUILD_JOBS)),)
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
endif
ifneq ($(strip $(CARGO_PROFILE)),)
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
endif
ifdef FEATURES
ifneq ($(strip $(FEATURES)),)
CARGO_BUILD_OPTS += --features ${FEATURES}
endif
ifdef TARGET_DIR
ifneq ($(strip $(TARGET_DIR)),)
CARGO_BUILD_OPTS += --target-dir ${TARGET_DIR}
endif
ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET}
endif
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else
@@ -73,11 +83,11 @@ fmt: ## Format all the Rust code.
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format --option "indent_string= "
taplo format
.PHONY: check-toml
check-toml: ## Check all TOML files.
taplo format --check --option "indent_string= "
taplo format --check
.PHONY: docker-image
docker-image: multi-platform-buildx ## Build docker image.
@@ -111,7 +121,7 @@ multi-platform-buildx: ## Create buildx multi-platform builder.
##@ Test
test: nextest ## Run unit and integration tests.
cargo nextest run --retries ${RETRY_COUNT} --build-jobs=${BUILD_JOBS}
cargo nextest run ${NEXTEST_OPTS}
.PHONY: nextest ## Install nextest tools.
nextest:
@@ -127,7 +137,7 @@ check: ## Cargo check all the targets.
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets -- -D warnings
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
.PHONY: fmt-check
fmt-check: ## Check code format.

View File

@@ -129,8 +129,12 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
### SDK
- [GreptimeDB Java
Client](https://github.com/GreptimeTeam/greptimedb-client-java)
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
## Project Status

View File

@@ -7,7 +7,7 @@ license.workspace = true
[dependencies]
arrow.workspace = true
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
client = { workspace = true }
indicatif = "0.17.1"
itertools.workspace = true
parquet.workspace = true

View File

@@ -38,8 +38,9 @@ sync_write = false
# Storage options, see `standalone.example.toml`.
[storage]
type = "File"
# The working home directory.
data_home = "/tmp/greptimedb/"
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"

View File

@@ -70,3 +70,11 @@ tcp_nodelay = true
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
# Datanode options.
[datanode]
# Datanode client options.
[datanode.client]
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true

View File

@@ -1,3 +1,5 @@
# The working home directory.
data_home = "/tmp/metasrv/"
# The bind address of metasrv, "127.0.0.1:3002" by default.
bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
@@ -13,6 +15,8 @@ datanode_lease_secs = 15
selector = "LeaseBased"
# Store data in memory, false by default.
use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# Log options, see `standalone.example.toml`
# [logging]

View File

@@ -2,6 +2,8 @@
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# HTTP server options.
[http_options]
@@ -96,10 +98,10 @@ sync_write = false
# Storage options.
[storage]
# The working home directory.
data_home = "/tmp/greptimedb/"
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_home = "/tmp/greptimedb/"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"

View File

@@ -26,7 +26,7 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \

View File

@@ -7,7 +7,8 @@ ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies.
RUN apt-get update && apt-get install -y \
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
@@ -25,7 +26,7 @@ ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \

View File

@@ -0,0 +1,202 @@
---
Feature Name: metric-engine
Tracking Issue: TBD
Date: 2023-07-10
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
# Summary
A new metric engine that can significantly enhance our ability to handle the tremendous number of small tables in scenarios like Prometheus metrics, by leveraging a synthetic wide table that offers storage and metadata multiplexing capabilities over the existing engine.
# Motivation
The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series storage like Prometheus or VictoriaMetrics. This has lots of disadvantages in aspects from performance, footprint, and storage to cost.
# Details
## Top level description
- User Interface
This feature will add a new type of storage engine. It might be available to be an option like `with ENGINE=mito` or an internal interface like auto create table on Prometheus remote write. From the user side, there is no difference from tables in mito engine. All the DDL like `CREATE`, `ALTER` and DML like `SELECT` should be supported.
- Implementation Overlook
This new engine doesn't re-implement low level components like file R/W etc. It's a wrapper layer over the existing mito engine, with extra storage and metadata multiplexing capabilities. I.e., it expose multiple table based on one mito engine table like this:
``` plaintext
┌───────────────┐ ┌───────────────┐ ┌───────────────┐
│ Metric Engine │ │ Metric Engine │ │ Metric Engine │
│ Table 1 │ │ Table 2 │ │ Table 3 │
└───────────────┘ └───────────────┘ └───────────────┘
▲ ▲ ▲
│ │ │
└───────────────┼───────────────────┘
┌─────────┴────────┐
│ Metric Region │
│ Engine │
│ ┌─────────────┤
│ │ Mito Region │
│ │ Engine │
└────▲─────────────┘
┌─────┴───────────────┐
│ │
│ Mito Engine Table │
│ │
└─────────────────────┘
```
The following parts will describe these implementation details:
- How to route these metric region tables and how those table are distributed
- How to maintain the schema and other metadata of the underlying mito engine table
- How to maintain the schema of metric engine table
- How the query goes
## Routing
Before this change, the region route rule was based on a group of partition keys. Relation of physical table to region is one-to-many.
``` rust
pub struct PartitionDef {
partition_columns: Vec<String>,
partition_bounds: Vec<PartitionBound>,
}
```
And for metric engine tables, the key difference is we split the concept of "physical table" and "logical table". Like the previous ASCII chart, multiple logical tables are based on one physical table. The relationship of logical table to region becomes many-to-many. Thus, we must include the table name (of logical table) into partition rules.
Consider the partition/route interface is a generic map of string array to region id, all we need to do is to insert logical table name into the request:
``` rust
fn route(request: Vec<String>) -> RegionId;
```
The next question is, where to do this conversion? The basic idea is to dispatch different routing behavior based on the engine type. Since we have all the necessary information in frontend, it's a good place to do that. And can leave meta server untouched. The essential change is to associate engine type with route rule.
## Physical Region Schema
The idea "physical wide table" is to perform column-level multiplexing. I.e., map all logical columns to physical columns by their names.
```
┌────────────┐ ┌────────────┐ ┌────────────┐
│ Table 1 │ │ Table 2 │ │ Table 3 │
├───┬────┬───┤ ├───┬────┬───┤ ├───┬────┬───┤
│C1 │ C2 │ C3│ │C1 │ C3 │ C5├──────┐ │C2 │ C4 │ C6│
└─┬─┴──┬─┴─┬─┘ ┌────┴───┴──┬─┴───┘ │ └─┬─┴──┬─┴─┬─┘
│ │ │ │ │ │ │ │ │
│ │ │ │ └──────────┐ │ │ │ │
│ │ │ │ │ │ │ │ │
│ │ │ │ ┌─────────────────┐ │ │ │ │ │
│ │ │ │ │ Physical Table │ │ │ │ │ │
│ │ │ │ ├──┬──┬──┬──┬──┬──┘ │ │ │ │ │
└────x───x───┴─►│C1│C2│C3│C4│C5│C6◄─┼─x────x────x───┘
│ │ └──┘▲─┘▲─┴─▲└─▲└──┘ │ │ │ │
│ │ │ │ │ │ │ │ │ │
├───x──────────┘ ├───x──x─────┘ │ │ │
│ │ │ │ │ │ │ │
│ └─────────────┘ │ └───────┘ │ │
│ │ │ │
└─────────────────────x───────────────┘ │
│ │
└────────────────────┘
```
This approach is very straightforward but has one problem. It only works when two columns have different semantic type (time index, tag or field) or data types but with the same name. E.g., `CREATE TABLE t1 (c1 timestamp(3) TIME INDEX)` and `CREATE TABLE t2 (c1 STRING PRIMARY KEY)`.
One possible workaround is to prefix each column with its data type and semantic type, like `_STRING_PK_c1`. However, considering the primary goal at present is to support data from monitoring metrics like Prometheus remote write, it's acceptable not to support this at first because data types are often simple and limited here.
The next point is changing the physical table's schema. This is only needed when creating a new logical table or altering the existing table. Typically speaking, table creating and altering are explicit. We only need to emit an add column request to underlying physical table on processing logical table's DDL. GreptimeDB can create or alter table automatically on some protocols, but the internal logic is the same.
Also for simplicity, we don't support shrinking the underlying table at first. This can be achieved by introducing mechanism on the physical column.
Frontend needs not to keep physical table's schema.
## Metadata of physical regions
Those metric engine regions need to store extra metadata like the schema of logical table or all logical table's name. That information is relatively simple and can be stored in a format like key-value pair. For now, we have to use another physical mito region for metadata. This involves an issue with region scheduling. Since we don't have the ability to perform affinity scheduling, the initial version will just assume the data region and metadata region are in the same instance. See alternatives - other storage for physical region's metadata for possible future improvement.
Here is the schema of metadata region and how we would use it. The `CREATE TABLE` clause of metadata region looks like the following. Notice that it wouldn't be actually created by SQL.
``` sql
CREATE TABLE metadata(
ts timestamp time index,
key string primary key,
value string
);
```
The `ts` field is just a placeholder -- for the constraints that a mito region must contain a time index field. It will always be `0`. The other two fields `key` and `value` will be used as a k-v storage. It contains two group of key
- `__table_<TABLE_NAME>` is used for marking table existence. It doesn't have value.
- `__column_<TABLE_NAME>_<COLUMN_NAME>` is used for marking table existence, the value is column's semantic type.
## Physical region implementation
This RFC proposes to add a new region implementation named "MetricRegion". As showed in the first chart, it's wrapped over the existing mito region. This section will describe the implementation details. Firstly, here is a chart shows how the region hierarchy looks like:
```plaintext
┌───────────────────────┐
│ Metric Region │
│ │
│ ┌────────┬──────────┤
│ │ Mito │ Mito │
│ │ Region │ Region │
│ │ for │ for │
│ │ Data │ Metadata │
└───┴────────┴──────────┘
```
All upper levels only see the Metric Region. E.g., Meta Server schedules on this region, or Frontend routes requests to this Metrics Region's id. To be scheduled (open or close etc.), Metric Region needs to implement its own procedures. Most of those procedures can be simply assembled from underlying Mito Regions', but those related to data like alter or drop will have its own new logic.
Another point is region id. Since the region id is used widely from meta server to persisted state, it's better to keep it unchanged. This means we can't use the same id for two regions, but one for each. To achieve this, this RFC proposes a concept named "region id group". A region id group is a group of region ids that are bound for different purposes. Like the two underlying regions here.
This preserves the first 8 bits of the `u32` region number for grouping. Each group has one main id (the first one) and other sub ids (the rest non-zero ids). All components other than the region implementation itself doesn't aware of the existence of region id group. They only see the main id. The region implementation is in response of managing and using the region id group.
```plaintext
63 31 23 0
┌────────────────────────────────────┬──────────┬──────────────────┐
│ Table Id(32) │ Group(8) │ Region Number(24)│
└────────────────────────────────────┴──────────┴──────────────────┘
Region Id(32)
```
## Routing in meta server
From previous sections, we can conclude the following points about routing:
- Each "logical table" has its own, universe unique table id.
- Logical table doesn't have physical region, they share the same physical region with other logical tables.
- Route rule of logical table's is a strict subset of physical table's.
To associate the logical table with physical region, we need to specify necessary information in the create table request. Specifically, the table type and its parent table. This require to change our gRPC proto's definition. And once meta recognize the table to create is a logical table, it will use the parent table's region to create route entry.
And to reduce the consumption of region failover (which need to update the physical table route info), we'd better to split the current route table structure into two parts:
```rust
region_route: Map<TableName, [RegionId]>,
node_route: Map<RegionId, NodeId>,
```
By doing this on each failover the meta server only needs to update the second `node_route` map and leave the first one untouched.
## Query
Like other existing components, a user query always starts in the frontend. In the planning phase, frontend needs to fetch related schemas of the queried table. This part is the same. I.e., changes in this RFC don't affect components above the `Table` abstraction.
# Alternatives
## Other routing method
We can also do this "special" route rule in the meta server. But there is no difference with the proposed method.
## Other storage for physical region's metadata
Once we have implemented the "region family" that allows multiple physical schemas exist in one region, we can store the metadata and table data into one region.
Before that, we can also let the `MetricRegion` holds a `KvBackend` to access the storage layer directly. But this breaks the abstraction in some way.
# Drawbacks
Since the physical storage is mixed together. It's hard to do fine-grained operations in table level. Like configuring TTL, memtable size or compaction strategy in table level. Or define different partition rules for different tables. For scenarios like this, it's better to move the table out of metrics engine and "upgrade" it to a normal mito engine table. This requires a migration process in a low cost. And we have to ensure data consistency during the migration, which may require a out-of-service period.

View File

@@ -0,0 +1,175 @@
---
Feature Name: table-trait-refactor
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2065
Date: 2023-08-04
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
Refactor Table Trait
--------------------
# Summary
Refactor `Table` trait to adapt the new region server architecture and make code more straightforward.
# Motivation
The `Table` is designed in the background of both frontend and datanode keeping the same concepts. And all the operations are served by a `Table`. However, in our practice, we found that not all the operations are suitable to be served by a `Table`. For example, the `Table` doesn't hold actual physical data itself, thus operations like write or alter are simply a proxy over underlying regions. And in the recent refactor to datanode ([rfc table-engine-refactor](./2023-07-06-table-engine-refactor.md)), we are changing datanode to region server that is only aware of `Region` things. This also calls for a refactor to the `Table` trait.
# Details
## Definitions
The current `Table` trait contains the following methods:
```rust
pub trait Table {
/// Get a reference to the schema for this table
fn schema(&self) -> SchemaRef;
/// Get a reference to the table info.
fn table_info(&self) -> TableInfoRef;
/// Get the type of this table for metadata/catalog purposes.
fn table_type(&self) -> TableType;
/// Insert values into table.
///
/// Returns number of inserted rows.
async fn insert(&self, _request: InsertRequest) -> Result<usize>;
/// Generate a record batch stream for querying.
async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
/// Tests whether the table provider can make use of any or all filter expressions
/// to optimise data retrieval.
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>>;
/// Alter table.
async fn alter(&self, _context: AlterContext, _request: &AlterTableRequest) -> Result<()>;
/// Delete rows in the table.
///
/// Returns number of deleted rows.
async fn delete(&self, _request: DeleteRequest) -> Result<usize>;
/// Flush table.
///
/// Options:
/// - region_number: specify region to flush.
/// - wait: Whether to wait until flush is done.
async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
/// Close the table.
async fn close(&self, _regions: &[RegionNumber]) -> Result<()>;
/// Get region stats in this table.
fn region_stats(&self) -> Result<Vec<RegionStat>>;
/// Return true if contains the region
fn contains_region(&self, _region: RegionNumber) -> Result<bool>;
/// Get statistics for this table, if available
fn statistics(&self) -> Option<TableStatistics>;
async fn compact(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
}
```
We can divide those methods into three categories from the perspective of functionality:
| Retrieve Metadata | Manipulate Data | Read Data |
| :------------------------: | :-------------: | :--------------: |
| `schema` | `insert` | `scan_to_stream` |
| `table_info` | `alter` | |
| `table_type` | `delete` | |
| `supports_filter_pushdown` | `flush` | |
| `region_stats` | `close` | |
| `contains_region` | `compact` | |
| `statistics` | | |
And considering most of the access to metadata happens in frontend, like route or query; and all the persisted data are stored in regions; while only the query engine needs to read data. We can divide the `Table` trait into three concepts:
- struct `Table` provides metadata:
```rust
impl Table {
/// Get a reference to the schema for this table
fn schema(&self) -> SchemaRef;
/// Get a reference to the table info.
fn table_info(&self) -> TableInfoRef;
/// Get the type of this table for metadata/catalog purposes.
fn table_type(&self) -> TableType;
/// Get statistics for this table, if available
fn statistics(&self) -> Option<TableStatistics>;
fn to_data_source(&self) -> DataSourceRef;
}
```
- Requests to region server
- `InsertRequest`
- `AlterRequest`
- `DeleteRequest`
- `FlushRequest`
- `CompactRequest`
- `CloseRequest`
- trait `DataSource` provides data (`RecordBatch`)
```rust
trait DataSource {
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
}
```
## Use `Table`
`Table` will only be used in frontend. It's constructed from the `OpenTableRequest` or `CreateTableRequest`.
`Table` also provides a method `to_data_source` to generate a `DataSource` from itself. But this method is only for non-`TableType::Base` tables (i.e., `TableType::View` and `TableType::Temporary`) because `TableType::Base` table doesn't hold actual data itself. Its `DataSource` should be constructed from the `Region` directly (in other words, it's a remote query).
And it requires some extra information to construct a `DataSource`, named `TableSourceProvider`:
```rust
type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
pub enum TableSourceProvider {
Base,
View(LogicalPlan),
Temporary(TableFactory),
}
```
## Use `DataSource`
`DataSource` will be adapted to the `TableProvider` from DataFusion that can be `scan()`ed in a `TableScan` plan.
In frontend this is done in the planning phase. And datanode will have one implementation for `Region` to generate record batch stream.
## Interact with RegionServer
Previously, persisted state change operations were through the old `Table` trait, like said before. Now they will come from the action source, like the procedure or protocol handler directly to the region server. E.g., on alter table, the corresponding procedure will generate its `AlterRequest` and send it to regions. Or write request will be split in frontend handler, and sent to regions. `Table` only provides necessary metadata like route information if needed, but not the necessary part anymore.
## Implement temporary table
Temporary table is a special table that doesn't revolves to any persistent physical region. Examples are:
- the `Numbers` table for testing, which produces a record batch that contains 0-100 integers.
- tables in information schema. It is an interface for querying catalog's metadata. The contents are generated on the fly with information from `CatalogManager`. The `CatalogManager` can be held in `TableFactory`.
- Function table that produces data generated by a formula or a function. Like something that always `sin(current_timestamp())`.
## Relationship among those components
Here is a diagram to show the relationship among those components, and how they interact with each other.
```mermaid
erDiagram
CatalogManager ||--|{ Table : manages
Table ||--|{ DataStream : generates
Table ||--|{ Region : routes
Region ||--|{ DataStream : implements
DataStream }|..|| QueryEngine : adapts-to
Procedure ||--|{ Region : requests
Protocol ||--|{ Region : writes
Protocol ||--|{ QueryEngine : queries
```
# Drawback
This is a breaking change.

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2023-05-03"
channel = "nightly-2023-08-07"

View File

@@ -2,14 +2,14 @@
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
set -e
set -e -x
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
OUT_DIR="${1:-$SCRIPT_DIR}"
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
echo "Downloading assets to dir: $OUT_DIR"
cd $OUT_DIR

View File

@@ -61,7 +61,16 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
fi
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
if [ -n "${PACKAGE_NAME}" ]; then
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
# Extract the binary and clean the rest.
tar xvf "${PACKAGE_NAME}" && \
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
rm -r "${PACKAGE_NAME}" && \
rm -r "${PACKAGE_NAME%.tar.gz}" && \
echo "Run './${BIN} --help' to get started"
fi
fi

View File

@@ -5,11 +5,10 @@ edition.workspace = true
license.workspace = true
[dependencies]
arrow-flight.workspace = true
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
common-base = { workspace = true }
common-error = { workspace = true }
common-time = { workspace = true }
datatypes = { workspace = true }
greptime-proto.workspace = true
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -13,15 +13,17 @@
// limitations under the License.
use common_base::BitVec;
use common_time::interval::IntervalUnit;
use common_time::timestamp::TimeUnit;
use common_time::Interval;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::{TimeType, TimestampType};
use datatypes::types::{IntervalType, TimeType, TimestampType};
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::{DdlRequest, QueryRequest};
use greptime_proto::v1::{DdlRequest, IntervalMonthDayNano, QueryRequest};
use snafu::prelude::*;
use crate::error::{self, Result};
@@ -75,6 +77,11 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::TimeMillisecond => ConcreteDataType::time_millisecond_datatype(),
ColumnDataType::TimeMicrosecond => ConcreteDataType::time_microsecond_datatype(),
ColumnDataType::TimeNanosecond => ConcreteDataType::time_nanosecond_datatype(),
ColumnDataType::IntervalYearMonth => ConcreteDataType::interval_year_month_datatype(),
ColumnDataType::IntervalDayTime => ConcreteDataType::interval_day_time_datatype(),
ColumnDataType::IntervalMonthDayNano => {
ConcreteDataType::interval_month_day_nano_datatype()
}
}
}
}
@@ -111,6 +118,11 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
TimeType::Microsecond(_) => ColumnDataType::TimeMicrosecond,
TimeType::Nanosecond(_) => ColumnDataType::TimeNanosecond,
},
ConcreteDataType::Interval(i) => match i {
IntervalType::YearMonth(_) => ColumnDataType::IntervalYearMonth,
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
},
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => {
@@ -215,6 +227,18 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
time_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::IntervalDayTime => Values {
interval_day_time_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::IntervalYearMonth => Values {
interval_year_month_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::IntervalMonthDayNano => Values {
interval_month_day_nano_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
@@ -255,6 +279,13 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
},
Value::Interval(val) => match val.unit() {
IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
IntervalUnit::MonthDayNano => values
.interval_month_day_nano_values
.push(convert_i128_to_interval(val.to_i128())),
},
Value::List(_) => unreachable!(),
});
column.null_mask = null_mask.into_vec();
@@ -294,14 +325,26 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
}
}
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
let interval = Interval::from_i128(v);
let (months, days, nanoseconds) = interval.to_month_day_nano();
IntervalMonthDayNano {
months,
days,
nanoseconds,
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::{
BooleanVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector,
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
TimestampSecondVector, Vector,
};
use super::*;
@@ -367,6 +410,14 @@ mod tests {
let values = values_with_capacity(ColumnDataType::TimeMillisecond, 2);
let values = values.time_millisecond_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::IntervalDayTime, 2);
let values = values.interval_day_time_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::IntervalMonthDayNano, 2);
let values = values.interval_month_day_nano_values;
assert_eq!(2, values.capacity());
}
#[test]
@@ -439,6 +490,18 @@ mod tests {
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
);
}
#[test]
@@ -509,6 +572,24 @@ mod tests {
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
.try_into()
.unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
@@ -608,9 +689,53 @@ mod tests {
);
}
#[test]
fn test_column_put_interval_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
};
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().interval_year_month_values
);
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().interval_day_time_values
);
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
let len = vector.len();
push_vals(&mut column, 3, vector);
(0..len).for_each(|i| {
assert_eq!(
7 + i as i64,
column
.values
.as_ref()
.unwrap()
.interval_month_day_nano_values
.get(i)
.unwrap()
.nanoseconds
);
});
}
#[test]
fn test_column_put_vector() {
use crate::v1::column::SemanticType;
use crate::v1::SemanticType;
// Some(false), None, Some(true), Some(true)
let mut column = Column {
column_name: "test".to_string(),
@@ -632,4 +757,13 @@ mod tests {
let null_mask = column.null_mask;
assert_eq!(34, null_mask[0]);
}
#[test]
fn test_convert_i128_to_interval() {
let i128_val = 3000;
let interval = convert_i128_to_interval(i128_val);
assert_eq!(interval.months, 0);
assert_eq!(interval.days, 0);
assert_eq!(interval.nanoseconds, 3000);
}
}

View File

@@ -8,48 +8,45 @@ license.workspace = true
testing = []
[dependencies]
api = { path = "../api" }
api = { workspace = true }
arc-swap = "1.0"
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
backoff = { version = "0.4", features = ["tokio"] }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-meta = { path = "../common/meta" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
common-runtime = { workspace = true }
common-telemetry = { workspace = true }
common-time = { workspace = true }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../datatypes" }
datatypes = { workspace = true }
futures = "0.3"
futures-util.workspace = true
key-lock = "0.1"
lazy_static.workspace = true
meta-client = { path = "../meta-client" }
meta-client = { workspace = true }
metrics.workspace = true
moka = { version = "0.11", features = ["future"] }
parking_lot = "0.12"
regex.workspace = true
serde = "1.0"
serde.workspace = true
serde_json = "1.0"
session = { path = "../session" }
session = { workspace = true }
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
store-api = { path = "../store-api" }
table = { path = "../table" }
store-api = { workspace = true }
table = { workspace = true }
tokio.workspace = true
[dev-dependencies]
catalog = { path = ".", features = ["testing"] }
common-test-util = { path = "../common/test-util" }
catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true
log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
storage = { path = "../storage" }
common-test-util = { workspace = true }
log-store = { workspace = true }
mito = { workspace = true, features = ["test"] }
object-store = { workspace = true }
storage = { workspace = true }
tokio.workspace = true

View File

@@ -16,26 +16,33 @@ mod columns;
mod tables;
use std::any::Any;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
use async_trait::async_trait;
use common_catalog::consts::{
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
INFORMATION_SCHEMA_TABLES_TABLE_ID,
};
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
use store_api::storage::ScanRequest;
use store_api::storage::{ScanRequest, TableId};
use table::data_source::DataSource;
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::metadata::TableType;
use table::metadata::{TableIdent, TableInfoBuilder, TableMetaBuilder, TableType};
use table::{Result as TableResult, Table, TableRef};
use self::columns::InformationSchemaColumns;
use crate::error::Result;
use crate::information_schema::tables::InformationSchemaTables;
use crate::table_factory::TableFactory;
use crate::CatalogManager;
const TABLES: &str = "tables";
const COLUMNS: &str = "columns";
pub const TABLES: &str = "tables";
pub const COLUMNS: &str = "columns";
pub struct InformationSchemaProvider {
catalog_name: String,
@@ -49,25 +56,97 @@ impl InformationSchemaProvider {
catalog_manager,
}
}
}
impl InformationSchemaProvider {
/// Build a map of [TableRef] in information schema.
/// Including `tables` and `columns`.
pub fn build(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> HashMap<String, TableRef> {
let mut schema = HashMap::new();
schema.insert(
TABLES.to_string(),
Arc::new(InformationTable::new(
catalog_name.clone(),
INFORMATION_SCHEMA_TABLES_TABLE_ID,
TABLES.to_string(),
Arc::new(InformationSchemaTables::new(
catalog_name.clone(),
catalog_manager.clone(),
)),
)) as _,
);
schema.insert(
COLUMNS.to_string(),
Arc::new(InformationTable::new(
catalog_name.clone(),
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
COLUMNS.to_string(),
Arc::new(InformationSchemaColumns::new(catalog_name, catalog_manager)),
)) as _,
);
schema
}
pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
let stream_builder = match name.to_ascii_lowercase().as_ref() {
TABLES => Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
COLUMNS => Arc::new(InformationSchemaColumns::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
TABLES => (
Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
INFORMATION_SCHEMA_TABLES_TABLE_ID,
),
COLUMNS => (
Arc::new(InformationSchemaColumns::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
),
_ => {
return Ok(None);
}
};
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
Ok(Some(Arc::new(InformationTable::new(
self.catalog_name.clone(),
table_id,
name.to_string(),
stream_builder,
))))
}
pub fn table_factory(&self, name: &str) -> Result<Option<TableFactory>> {
let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
TABLES => (
Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
INFORMATION_SCHEMA_TABLES_TABLE_ID,
),
COLUMNS => (
Arc::new(InformationSchemaColumns::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
),
_ => {
return Ok(None);
}
};
let data_source = Arc::new(InformationTable::new(
self.catalog_name.clone(),
table_id,
name.to_string(),
stream_builder,
));
Ok(Some(Arc::new(move || data_source.clone())))
}
}
@@ -80,12 +159,25 @@ pub trait InformationStreamBuilder: Send + Sync {
}
pub struct InformationTable {
catalog_name: String,
table_id: TableId,
name: String,
stream_builder: Arc<dyn InformationStreamBuilder>,
}
impl InformationTable {
pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
Self { stream_builder }
pub fn new(
catalog_name: String,
table_id: TableId,
name: String,
stream_builder: Arc<dyn InformationStreamBuilder>,
) -> Self {
Self {
catalog_name,
table_id,
name,
stream_builder,
}
}
}
@@ -100,14 +192,39 @@ impl Table for InformationTable {
}
fn table_info(&self) -> table::metadata::TableInfoRef {
unreachable!("Should not call table_info() of InformationTable directly")
let table_meta = TableMetaBuilder::default()
.schema(self.stream_builder.schema())
.primary_key_indices(vec![])
.next_column_id(0)
.build()
.unwrap();
Arc::new(
TableInfoBuilder::default()
.ident(TableIdent {
table_id: self.table_id,
version: 0,
})
.name(self.name.clone())
.catalog_name(self.catalog_name.clone())
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
.meta(table_meta)
.table_type(TableType::Temporary)
.build()
.unwrap(),
)
}
fn table_type(&self) -> table::metadata::TableType {
TableType::View
fn table_type(&self) -> TableType {
TableType::Temporary
}
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
self.get_stream(request)
}
}
impl DataSource for InformationTable {
fn get_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
let projection = request.projection;
let projected_schema = if let Some(projection) = &projection {
Arc::new(

View File

@@ -16,14 +16,15 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::{
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
SEMANTIC_TYPE_TIME_INDEX,
};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::scalars::ScalarVectorBuilder;
@@ -31,7 +32,8 @@ use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use super::InformationStreamBuilder;
use super::tables::InformationSchemaTables;
use super::{InformationStreamBuilder, COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -52,19 +54,22 @@ const SEMANTIC_TYPE: &str = "semantic_type";
impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let schema = Arc::new(Schema::new(vec![
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
]));
Self {
schema,
catalog_name,
catalog_manager,
}
]))
}
fn builder(&self) -> InformationSchemaColumnsBuilder {
@@ -153,9 +158,28 @@ impl InformationSchemaColumnsBuilder {
.table_names(&catalog_name, &schema_name)
.await?
{
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema();
let (keys, schema) = if let Some(table) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await?
{
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema();
(keys.clone(), schema)
} else {
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
if schema_name == INFORMATION_SCHEMA_NAME {
if table_name == COLUMNS {
(vec![], InformationSchemaColumns::schema())
} else if table_name == TABLES {
(vec![], InformationSchemaTables::schema())
} else {
continue;
}
} else {
continue;
}
};
for (idx, column) in schema.column_schemas().iter().enumerate() {
let semantic_type = if column.is_time_index() {
SEMANTIC_TYPE_TIME_INDEX

View File

@@ -15,13 +15,16 @@
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_catalog::consts::{
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
INFORMATION_SCHEMA_TABLES_TABLE_ID,
};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
@@ -29,6 +32,7 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
use snafu::{OptionExt, ResultExt};
use table::metadata::TableType;
use super::{COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -43,19 +47,22 @@ pub(super) struct InformationSchemaTables {
impl InformationSchemaTables {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let schema = Arc::new(Schema::new(vec![
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
}
}
pub(crate) fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
]));
Self {
schema,
catalog_name,
catalog_manager,
}
]))
}
fn builder(&self) -> InformationSchemaTablesBuilder {
@@ -137,9 +144,6 @@ impl InformationSchemaTablesBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
if schema_name == INFORMATION_SCHEMA_NAME {
continue;
}
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.await?
@@ -151,16 +155,43 @@ impl InformationSchemaTablesBuilder {
.table_names(&catalog_name, &schema_name)
.await?
{
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
let table_info = table.table_info();
self.add_table(
&catalog_name,
&schema_name,
&table_name,
table.table_type(),
Some(table_info.ident.table_id),
Some(&table_info.meta.engine),
);
if let Some(table) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await?
{
let table_info = table.table_info();
self.add_table(
&catalog_name,
&schema_name,
&table_name,
table.table_type(),
Some(table_info.ident.table_id),
Some(&table_info.meta.engine),
);
} else {
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
if schema_name == INFORMATION_SCHEMA_NAME {
if table_name == COLUMNS {
self.add_table(
&catalog_name,
&schema_name,
&table_name,
TableType::Temporary,
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
None,
);
} else if table_name == TABLES {
self.add_table(
&catalog_name,
&schema_name,
&table_name,
TableType::Temporary,
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
None,
);
}
}
};
}
}

View File

@@ -37,6 +37,7 @@ pub mod local;
mod metrics;
pub mod remote;
pub mod system;
pub mod table_factory;
pub mod table_source;
pub mod tables;
@@ -48,7 +49,7 @@ pub trait CatalogManager: Send + Sync {
async fn start(&self) -> Result<()>;
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
async fn register_catalog(&self, name: String) -> Result<bool>;
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
@@ -217,13 +218,27 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
let Ok(catalog_names) = catalog_manager.catalog_names().await else {
return (region_number, region_stats);
};
for catalog_name in catalog_names {
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else { continue };
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else {
continue;
};
for schema_name in schema_names {
let Ok(table_names) = catalog_manager.table_names(&catalog_name,&schema_name).await else { continue };
let Ok(table_names) = catalog_manager
.table_names(&catalog_name, &schema_name)
.await
else {
continue;
};
for table_name in table_names {
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
let Ok(Some(table)) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await
else {
continue;
};
let table_info = table.table_info();
let region_numbers = &table_info.meta.region_numbers;

View File

@@ -43,7 +43,6 @@ use crate::error::{
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
TableNotFoundSnafu, UnimplementedSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::local::memory::MemoryCatalogManager;
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
@@ -51,9 +50,8 @@ use crate::system::{
};
use crate::tables::SystemCatalog;
use crate::{
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterSchemaRequest,
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, RenameTableRequest,
handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
@@ -118,11 +116,18 @@ impl LocalCatalogManager {
}
async fn init_system_catalog(&self) -> Result<()> {
// register default catalog and default schema
self.catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})?;
// register SystemCatalogTable
let _ = self
.catalogs
self.catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
@@ -133,16 +138,7 @@ impl LocalCatalogManager {
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.clone(),
};
let _ = self.catalogs.register_table(register_table_req).await?;
// register default catalog and default schema
let _ = self
.catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})?;
self.catalogs.register_table(register_table_req).await?;
// Add numbers table for test
let numbers_table = Arc::new(NumbersTable::default());
@@ -154,8 +150,7 @@ impl LocalCatalogManager {
table: numbers_table,
};
let _ = self
.catalogs
self.catalogs
.register_table(register_number_table_req)
.await?;
@@ -230,9 +225,8 @@ impl LocalCatalogManager {
for entry in entries {
match entry {
Entry::Catalog(c) => {
let _ = self
.catalogs
.register_catalog_if_absent(c.catalog_name.clone());
self.catalogs
.register_catalog_sync(c.catalog_name.clone())?;
info!("Register catalog: {}", c.catalog_name);
}
Entry::Schema(s) => {
@@ -548,13 +542,6 @@ impl CatalogManager for LocalCatalogManager {
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
if schema_name == INFORMATION_SCHEMA_NAME {
let manager: CatalogManagerRef = self.catalogs.clone() as _;
let provider =
InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
return provider.table(table_name);
}
self.catalogs
.table(catalog_name, schema_name, table_name)
.await
@@ -584,8 +571,8 @@ impl CatalogManager for LocalCatalogManager {
self.catalogs.table_names(catalog_name, schema_name).await
}
async fn register_catalog(&self, name: String) -> Result<bool> {
self.catalogs.register_catalog(name).await
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.catalogs.clone().register_catalog(name).await
}
fn as_any(&self) -> &dyn Any {

View File

@@ -16,9 +16,11 @@ use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::{Arc, RwLock, Weak};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
};
use metrics::{decrement_gauge, increment_gauge};
use snafu::OptionExt;
use table::metadata::TableId;
@@ -28,6 +30,7 @@ use table::TableRef;
use crate::error::{
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
@@ -42,24 +45,6 @@ pub struct MemoryCatalogManager {
pub table_id: AtomicU32,
}
impl Default for MemoryCatalogManager {
fn default() -> Self {
let manager = Self {
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
};
let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
let _ = manager
.catalogs
.write()
.unwrap()
.insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
manager
}
}
#[async_trait::async_trait]
impl TableIdProvider for MemoryCatalogManager {
async fn next_table_id(&self) -> table::error::Result<TableId> {
@@ -250,7 +235,7 @@ impl CatalogManager for MemoryCatalogManager {
.collect())
}
async fn register_catalog(&self, name: String) -> Result<bool> {
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.register_catalog_sync(name)
}
@@ -260,6 +245,28 @@ impl CatalogManager for MemoryCatalogManager {
}
impl MemoryCatalogManager {
/// Create a manager with some default setups
/// (e.g. default catalog/schema and information schema)
pub fn with_default_setup() -> Arc<Self> {
let manager = Arc::new(Self {
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
});
// Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
manager
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())
.unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})
.unwrap();
manager
}
/// Registers a catalog and return the catalog already exist
pub fn register_catalog_if_absent(&self, name: String) -> bool {
let mut catalogs = self.catalogs.write().unwrap();
@@ -273,12 +280,13 @@ impl MemoryCatalogManager {
}
}
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
pub fn register_catalog_sync(self: &Arc<Self>, name: String) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
match catalogs.entry(name) {
match catalogs.entry(name.clone()) {
Entry::Vacant(e) => {
e.insert(HashMap::new());
let catalog = self.create_catalog_entry(name);
e.insert(catalog);
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
Ok(true)
}
@@ -332,9 +340,19 @@ impl MemoryCatalogManager {
Ok(true)
}
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
let information_schema = InformationSchemaProvider::build(
catalog,
Arc::downgrade(self) as Weak<dyn CatalogManager>,
);
let mut catalog = HashMap::new();
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
catalog
}
#[cfg(any(test, feature = "testing"))]
pub fn new_with_table(table: TableRef) -> Self {
let manager = Self::default();
pub fn new_with_table(table: TableRef) -> Arc<Self> {
let manager = Self::with_default_setup();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
@@ -349,7 +367,7 @@ impl MemoryCatalogManager {
/// Create a memory catalog list contains a numbers table for test
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
Ok(Arc::new(MemoryCatalogManager::default()))
Ok(MemoryCatalogManager::with_default_setup())
}
#[cfg(test)]
@@ -392,7 +410,7 @@ mod tests {
#[tokio::test]
async fn test_mem_manager_rename_table() {
let catalog = MemoryCatalogManager::default();
let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "test_table";
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
@@ -456,7 +474,7 @@ mod tests {
#[tokio::test]
async fn test_catalog_rename_table() {
let catalog = MemoryCatalogManager::default();
let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "num";
let table_id = 2333;
let table: TableRef = Arc::new(NumbersTable::new(table_id));
@@ -507,14 +525,14 @@ mod tests {
#[test]
pub fn test_register_if_absent() {
let list = MemoryCatalogManager::default();
let list = MemoryCatalogManager::with_default_setup();
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
}
#[tokio::test]
pub async fn test_catalog_deregister_table() {
let catalog = MemoryCatalogManager::default();
let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "foo_table";
let register_table_req = RegisterTableRequest {
@@ -549,7 +567,7 @@ mod tests {
#[tokio::test]
async fn test_catalog_deregister_schema() {
let catalog = MemoryCatalogManager::default();
let catalog = MemoryCatalogManager::with_default_setup();
// Registers a catalog, a schema, and a table.
let catalog_name = "foo_catalog".to_string();
@@ -567,6 +585,7 @@ mod tests {
table: Arc::new(NumbersTable::default()),
};
catalog
.clone()
.register_catalog(catalog_name.clone())
.await
.unwrap();

View File

@@ -67,7 +67,7 @@ impl RemoteCatalogManager {
backend,
system_table_requests: Default::default(),
region_alive_keepers,
memory_catalog_manager: Arc::new(MemoryCatalogManager::default()),
memory_catalog_manager: MemoryCatalogManager::with_default_setup(),
table_metadata_manager,
}
}
@@ -254,7 +254,10 @@ impl CatalogManager for RemoteCatalogManager {
let Some(table) = self
.memory_catalog_manager
.table(&request.catalog, &request.schema, &request.table_name)
.await? else { return Ok(()) };
.await?
else {
return Ok(());
};
let table_info = table.table_info();
let table_ident = TableIdent {
@@ -383,6 +386,7 @@ impl CatalogManager for RemoteCatalogManager {
if remote_catalog_exists
&& self
.memory_catalog_manager
.clone()
.register_catalog(catalog.to_string())
.await?
{
@@ -420,7 +424,7 @@ impl CatalogManager for RemoteCatalogManager {
.await
}
async fn register_catalog(&self, name: String) -> Result<bool> {
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.memory_catalog_manager.register_catalog_sync(name)
}

View File

@@ -23,6 +23,7 @@ use table::engine::{CloseTableResult, EngineContext, TableEngine};
use table::metadata::TableId;
use table::requests::{
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
TruncateTableRequest,
};
use table::test_util::MemTable;
use table::TableRef;
@@ -116,4 +117,12 @@ impl TableEngine for MockTableEngine {
async fn close(&self) -> table::Result<()> {
Ok(())
}
async fn truncate_table(
&self,
_ctx: &EngineContext,
_request: TruncateTableRequest,
) -> table::Result<bool> {
Ok(true)
}
}

View File

@@ -30,7 +30,7 @@ use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableInfoRef};
use table::metadata::{TableId, TableInfoRef, TableType};
use table::requests::{
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
};
@@ -71,6 +71,10 @@ impl Table for SystemCatalogTable {
self.0.table_info()
}
fn table_type(&self) -> TableType {
self.0.table_type()
}
async fn delete(&self, request: DeleteRequest) -> TableResult<usize> {
self.0.delete(request).await
}
@@ -264,7 +268,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
let primary_key_columns = build_primary_key_columns(entry_type, key);
let mut columns_values = HashMap::with_capacity(6);
columns_values.extend(primary_key_columns.into_iter());
columns_values.extend(primary_key_columns);
let _ = columns_values.insert(
"value".to_string(),
@@ -523,7 +527,7 @@ mod tests {
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
Arc::new(NoopLogStore),
object_store.clone(),
noop_compaction_scheduler,
)
@@ -574,9 +578,15 @@ mod tests {
assert_eq!(batch.num_rows(), 1);
let row = batch.rows().next().unwrap();
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
let Value::Binary(key) = row[1].clone() else { unreachable!() };
let Value::Binary(value) = row[3].clone() else { unreachable!() };
let Value::UInt8(entry_type) = row[0] else {
unreachable!()
};
let Value::Binary(key) = row[1].clone() else {
unreachable!()
};
let Value::Binary(value) = row[3].clone() else {
unreachable!()
};
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
let expected = Entry::Table(TableEntry {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),

View File

@@ -12,19 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_runtime::TaskFunction;
use std::sync::Arc;
struct ManifestGcTask {}
use table::data_source::DataSourceRef;
#[async_trait::async_trait]
impl TaskFunction<()> for ManifestGcTask {
/// Invoke the task.
async fn call(&mut self) -> std::result::Result<(), ()> {
todo!()
}
/// Name of the task.
fn name(&self) -> &str {
todo!()
}
}
pub type TableFactory = Arc<dyn Fn() -> DataSourceRef>;

View File

@@ -45,8 +45,8 @@ impl DfTableSourceProvider {
catalog_manager,
disallow_cross_schema_query,
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog(),
default_schema: query_ctx.current_schema(),
default_catalog: query_ctx.current_catalog().to_owned(),
default_schema: query_ctx.current_schema().to_owned(),
}
}
@@ -130,7 +130,7 @@ mod tests {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),

View File

@@ -26,7 +26,9 @@ mod tests {
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
};
use common_meta::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use common_meta::ident::TableIdent;
use common_meta::key::TableMetadataManager;
@@ -179,12 +181,17 @@ mod tests {
catalog_manager.catalog_names().await.unwrap()
);
let mut schema_names = catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap();
schema_names.sort_unstable();
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap()
vec![
INFORMATION_SCHEMA_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string()
],
schema_names
);
}
@@ -240,13 +247,18 @@ mod tests {
async fn test_register_table() {
let node_id = 42;
let components = prepare_components(node_id).await;
let mut schema_names = components
.catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap();
schema_names.sort_unstable();
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
components
.catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap()
vec![
INFORMATION_SCHEMA_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
],
schema_names
);
// register a new table with an nonexistent catalog
@@ -309,21 +321,16 @@ mod tests {
// register catalog to catalog manager
assert!(components
.catalog_manager
.clone()
.register_catalog(catalog_name.clone())
.await
.is_ok());
assert_eq!(
HashSet::<String>::from_iter(
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
),
HashSet::from_iter(
components
.catalog_manager
.catalog_names()
.await
.unwrap()
.into_iter()
)
HashSet::<String>::from_iter(vec![
DEFAULT_CATALOG_NAME.to_string(),
catalog_name.clone()
]),
HashSet::from_iter(components.catalog_manager.catalog_names().await.unwrap())
);
let table_to_register = components
@@ -380,7 +387,7 @@ mod tests {
.unwrap());
assert_eq!(
HashSet::from([schema_name.clone()]),
HashSet::from([schema_name.clone(), INFORMATION_SCHEMA_NAME.to_string()]),
components
.catalog_manager
.schema_names(&catalog_name)

View File

@@ -8,21 +8,20 @@ license.workspace = true
testing = []
[dependencies]
api = { path = "../api" }
api = { workspace = true }
arrow-flight.workspace = true
async-stream.workspace = true
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
common-meta = { path = "../common/meta" }
common-telemetry = { path = "../common/telemetry" }
common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
common-telemetry = { workspace = true }
common-time = { workspace = true }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
datatypes = { workspace = true }
enum_dispatch = "0.3"
futures-util.workspace = true
moka = { version = "0.9", features = ["future"] }
@@ -35,12 +34,13 @@ tokio.workspace = true
tonic.workspace = true
[dev-dependencies]
datanode = { path = "../datanode" }
common-grpc-expr = { workspace = true }
datanode = { workspace = true }
derive-new = "0.5"
substrait = { path = "../common/substrait" }
prost.workspace = true
substrait = { workspace = true }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
prost.workspace = true
[dev-dependencies.substrait_proto]
package = "substrait"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::column::*;
use api::v1::*;
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use derive_new::new;

View File

@@ -17,48 +17,52 @@ metrics-process = ["servers/metrics-process"]
[dependencies]
anymap = "1.0.0-beta.2"
async-trait.workspace = true
catalog = { path = "../catalog" }
catalog = { workspace = true }
chrono.workspace = true
clap = { version = "3.1", features = ["derive"] }
client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
common-meta = { path = "../common/meta" }
common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
client = { workspace = true }
common-base = { workspace = true }
common-error = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
common-telemetry = { workspace = true, features = [
"deadlock_detection",
] }
config = "0.13"
datanode = { path = "../datanode" }
datatypes = { path = "../datatypes" }
datanode = { workspace = true }
datatypes = { workspace = true }
either = "1.8"
etcd-client.workspace = true
frontend = { path = "../frontend" }
frontend = { workspace = true }
futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
meta-client = { workspace = true }
meta-srv = { workspace = true }
metrics.workspace = true
nu-ansi-term = "0.46"
partition = { path = "../partition" }
query = { path = "../query" }
partition = { workspace = true }
query = { workspace = true }
rand.workspace = true
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
session = { path = "../session" }
servers = { workspace = true }
session = { workspace = true }
snafu.workspace = true
substrait = { path = "../common/substrait" }
table = { path = "../table" }
tikv-jemallocator = "0.5"
substrait = { workspace = true }
table = { workspace = true }
tokio.workspace = true
[target.'cfg(not(windows))'.dependencies]
tikv-jemallocator = "0.5"
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
temp-env = "0.3"
common-test-util = { workspace = true }
serde.workspace = true
temp-env = "0.3"
toml.workspace = true
[target.'cfg(not(windows))'.dev-dependencies]
rexpect = "0.5"
[build-dependencies]
build-data = "0.1.4"
common-version = { workspace = true }

View File

@@ -12,22 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
const DEFAULT_VALUE: &str = "unknown";
fn main() {
println!(
"cargo:rustc-env=GIT_COMMIT={}",
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_COMMIT_SHORT={}",
build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_BRANCH={}",
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_DIRTY={}",
build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string())
);
common_version::setup_git_versions();
}

View File

@@ -187,6 +187,7 @@ fn log_env_flags() {
}
}
#[cfg(not(windows))]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;

View File

@@ -167,6 +167,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
value_indices: vec![],
options: Default::default(),
region_numbers: (1..=100).collect(),
partition_key_indices: vec![],
};
RawTableInfo {

View File

@@ -165,10 +165,7 @@ impl Repl {
let stmt = QueryLanguageParser::parse_sql(&sql)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
let query_ctx = Arc::new(QueryContext::with(
self.database.catalog(),
self.database.schema(),
));
let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
let plan = query_engine
.planner()
@@ -180,7 +177,7 @@ impl Repl {
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(plan)
.encode(&plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec(), None).await

View File

@@ -12,23 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use clap::Parser;
use common_meta::helper::TableGlobalValue;
use common_meta::error as MetaError;
use common_meta::helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{RegionDistribution, TableRegionKey, TableRegionValue};
use common_meta::key::TableMetaKey;
use common_meta::rpc::store::{BatchPutRequest, PutRequest, RangeRequest};
use common_meta::range_stream::PaginationStream;
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
use common_meta::rpc::KeyValue;
use common_meta::util::get_prefix_end_key;
use common_telemetry::info;
use etcd_client::Client;
use futures::TryStreamExt;
use meta_srv::service::store::etcd::EtcdStore;
use meta_srv::service::store::kv::KvStoreRef;
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
use snafu::ResultExt;
use crate::cli::{Instance, Tool};
use crate::error::{ConnectEtcdSnafu, Result};
use crate::error::{self, ConnectEtcdSnafu, Result};
#[derive(Debug, Default, Parser)]
pub struct UpgradeCommand {
@@ -60,25 +69,170 @@ struct MigrateTableMetadata {
#[async_trait]
impl Tool for MigrateTableMetadata {
// migrates database's metadata from 0.3 to 0.4.
async fn do_work(&self) -> Result<()> {
let req = RangeRequest::new().with_prefix(b"__tg".to_vec());
let resp = self.etcd_store.range(req).await.unwrap();
for kv in resp.kvs {
let key = String::from_utf8_lossy(kv.key());
let value = TableGlobalValue::from_bytes(kv.value())
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
self.migrate_table_global_values().await?;
self.migrate_catalog_keys().await?;
self.migrate_schema_keys().await?;
Ok(())
}
}
const PAGE_SIZE: usize = 1000;
impl MigrateTableMetadata {
async fn migrate_schema_keys(&self) -> Result<()> {
// The schema key prefix.
let key = b"__s".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
KvBackendAdapter::wrap(self.etcd_store.clone()),
RangeRequest::new().with_range(key, range_end),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key_str =
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
let key = v1SchemaKey::parse(key_str)
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
Ok((key, ()))
}),
);
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_schema_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
info!("Total migrated SchemaKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
let schema_name_value = SchemaNameValue;
info!("Creating '{new_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.as_raw_key())
.with_value(schema_name_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
Ok(())
}
async fn migrate_catalog_keys(&self) -> Result<()> {
// The catalog key prefix.
let key = b"__c".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
KvBackendAdapter::wrap(self.etcd_store.clone()),
RangeRequest::new().with_range(key, range_end),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key_str =
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
let key = v1CatalogKey::parse(key_str)
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
Ok((key, ()))
}),
);
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_catalog_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
info!("Total migrated CatalogKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
let new_key = CatalogNameKey::new(&key.catalog_name);
let catalog_name_value = CatalogNameValue;
info!("Creating '{new_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.as_raw_key())
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
}
async fn migrate_table_global_values(&self) -> Result<()> {
let key = b"__tg".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
KvBackendAdapter::wrap(self.etcd_store.clone()),
RangeRequest::new().with_range(key, range_end.clone()),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key = String::from_utf8_lossy(kv.key()).to_string();
let value = TableGlobalValue::from_bytes(kv.value())
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
Ok((key, value))
}),
);
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
self.create_table_name_key(&value).await;
self.create_datanode_table_keys(&value).await;
self.split_table_global_value(&key, value).await;
keys.push(key.as_bytes().to_vec());
}
info!("Total migrated TableGlobalKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
}
impl MigrateTableMetadata {
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
for keys in keys.chunks(PAGE_SIZE) {
info!("Deleting {} TableGlobalKeys", keys.len());
let req = BatchDeleteRequest {
keys: keys.to_vec(),
prev_kv: false,
};
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store.batch_delete(req).await.unwrap();
}
}
}
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
let table_id = value.table_id();
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();

View File

@@ -16,7 +16,7 @@ use std::time::Duration;
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
use datanode::datanode::{Datanode, DatanodeOptions};
use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
@@ -143,9 +143,7 @@ impl StartCommand {
}
if let Some(data_home) = &self.data_home {
opts.storage.store = ObjectStoreConfig::File(FileConfig {
data_home: data_home.clone(),
});
opts.storage.data_home = data_home.clone();
}
if let Some(wal_dir) = &self.wal_dir {
@@ -185,7 +183,9 @@ mod tests {
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{CompactionConfig, ObjectStoreConfig, RegionManifestConfig};
use datanode::datanode::{
CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig,
};
use servers::Mode;
use super::*;
@@ -243,8 +243,10 @@ mod tests {
..Default::default()
};
let Options::Datanode(options) =
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
let Options::Datanode(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!(Some(42), options.node_id);
@@ -268,16 +270,11 @@ mod tests {
assert_eq!(10000, ddl_timeout_millis);
assert_eq!(3000, timeout_millis);
assert!(tcp_nodelay);
match &options.storage.store {
ObjectStoreConfig::File(FileConfig { data_home, .. }) => {
assert_eq!("/tmp/greptimedb/", data_home)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
ObjectStoreConfig::Azblob { .. } => unreachable!(),
ObjectStoreConfig::Gcs { .. } => unreachable!(),
};
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
assert!(matches!(
&options.storage.store,
ObjectStoreConfig::File(FileConfig { .. })
));
assert_eq!(
CompactionConfig {
@@ -397,10 +394,10 @@ mod tests {
let env_prefix = "DATANODE_UT";
temp_env::with_vars(
vec![
[
(
// storage.manifest.gc_duration = 9s
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
@@ -411,7 +408,7 @@ mod tests {
),
(
// storage.compaction.max_purge_tasks = 99
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"compaction".to_uppercase(),
@@ -422,7 +419,7 @@ mod tests {
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),
@@ -440,7 +437,10 @@ mod tests {
};
let Options::Datanode(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
command.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
// Should be read from env, env > default values.
assert_eq!(

View File

@@ -23,6 +23,12 @@ use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to iter stream, source: {}", source))]
IterStream {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to start datanode, source: {}", source))]
StartDatanode {
location: Location,
@@ -176,6 +182,7 @@ impl ErrorExt for Error {
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }

View File

@@ -257,8 +257,10 @@ mod tests {
..Default::default()
};
let Options::Frontend(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
let Options::Frontend(opts) = command.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
assert_eq!(
@@ -323,8 +325,10 @@ mod tests {
..Default::default()
};
let Options::Frontend(fe_opts) =
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
let Options::Frontend(fe_opts) = command.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!(
"127.0.0.1:4000".to_string(),
@@ -404,10 +408,10 @@ mod tests {
let env_prefix = "FRONTEND_UT";
temp_env::with_vars(
vec![
[
(
// mysql_options.addr = 127.0.0.1:14002
vec![
[
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"addr".to_uppercase(),
@@ -417,7 +421,7 @@ mod tests {
),
(
// mysql_options.runtime_size = 11
vec![
[
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"runtime_size".to_uppercase(),
@@ -427,7 +431,7 @@ mod tests {
),
(
// http_options.addr = 127.0.0.1:24000
vec![
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
@@ -437,7 +441,7 @@ mod tests {
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),
@@ -458,8 +462,10 @@ mod tests {
log_dir: None,
log_level: Some("error".to_string()),
};
let Options::Frontend(fe_opts) =
command.load_options(top_level_opts).unwrap() else {unreachable!()};
let Options::Frontend(fe_opts) = command.load_options(top_level_opts).unwrap()
else {
unreachable!()
};
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);

View File

@@ -187,8 +187,10 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) =
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
assert_eq!(SelectorType::LoadBased, options.selector);
@@ -216,8 +218,10 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) =
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
@@ -269,20 +273,20 @@ mod tests {
let env_prefix = "METASRV_UT";
temp_env::with_vars(
vec![
[
(
// bind_addr = 127.0.0.1:14002
vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
[env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
Some("127.0.0.1:14002"),
),
(
// server_addr = 127.0.0.1:13002
vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
[env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
Some("127.0.0.1:13002"),
),
(
// http_options.addr = 127.0.0.1:24000
vec![
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
@@ -300,7 +304,10 @@ mod tests {
};
let Options::Metasrv(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
command.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");

View File

@@ -158,10 +158,10 @@ mod tests {
let env_prefix = "DATANODE_UT";
temp_env::with_vars(
// The following environment variables will be used to override the values in the config file.
vec![
[
(
// storage.manifest.checkpoint_margin = 99
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
@@ -172,7 +172,7 @@ mod tests {
),
(
// storage.type = S3
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"type".to_uppercase(),
@@ -182,7 +182,7 @@ mod tests {
),
(
// storage.bucket = mybucket
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"bucket".to_uppercase(),
@@ -192,7 +192,7 @@ mod tests {
),
(
// storage.manifest.gc_duration = 42s
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
@@ -203,7 +203,7 @@ mod tests {
),
(
// storage.manifest.checkpoint_on_startup = true
vec![
[
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
@@ -214,7 +214,7 @@ mod tests {
),
(
// wal.dir = /other/wal/dir
vec![
[
env_prefix.to_string(),
"wal".to_uppercase(),
"dir".to_uppercase(),
@@ -224,7 +224,7 @@ mod tests {
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),

View File

@@ -83,6 +83,7 @@ impl SubCommand {
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_memory_catalog: bool,
pub enable_telemetry: bool,
pub http_options: Option<HttpOptions>,
pub grpc_options: Option<GrpcOptions>,
pub mysql_options: Option<MysqlOptions>,
@@ -102,6 +103,7 @@ impl Default for StandaloneOptions {
Self {
mode: Mode::Standalone,
enable_memory_catalog: false,
enable_telemetry: true,
http_options: Some(HttpOptions::default()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
@@ -139,6 +141,7 @@ impl StandaloneOptions {
fn datanode_options(self) -> DatanodeOptions {
DatanodeOptions {
enable_memory_catalog: self.enable_memory_catalog,
enable_telemetry: self.enable_telemetry,
wal: self.wal,
storage: self.storage,
procedure: self.procedure,
@@ -423,7 +426,10 @@ mod tests {
..Default::default()
};
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
else {
unreachable!()
};
let fe_opts = options.fe_opts;
let dn_opts = options.dn_opts;
let logging_opts = options.logging;
@@ -484,7 +490,8 @@ mod tests {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
})
.unwrap() else {
.unwrap()
else {
unreachable!()
};
@@ -508,10 +515,10 @@ mod tests {
let env_prefix = "STANDALONE_UT";
temp_env::with_vars(
vec![
[
(
// logging.dir = /other/log/dir
vec![
[
env_prefix.to_string(),
"logging".to_uppercase(),
"dir".to_uppercase(),
@@ -521,7 +528,7 @@ mod tests {
),
(
// logging.level = info
vec![
[
env_prefix.to_string(),
"logging".to_uppercase(),
"level".to_uppercase(),
@@ -531,7 +538,7 @@ mod tests {
),
(
// http_options.addr = 127.0.0.1:24000
vec![
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
@@ -552,8 +559,10 @@ mod tests {
log_dir: None,
log_level: None,
};
let Options::Standalone(opts) =
command.load_options(top_level_opts).unwrap() else {unreachable!()};
let Options::Standalone(opts) = command.load_options(top_level_opts).unwrap()
else {
unreachable!()
};
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");

View File

@@ -8,7 +8,7 @@ license.workspace = true
anymap = "1.0.0-beta.2"
bitvec = "1.0"
bytes = { version = "1.1", features = ["serde"] }
common-error = { path = "../error" }
common-error = { workspace = true }
paste = "1.0"
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true

View File

@@ -17,7 +17,7 @@ use std::ops::Deref;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Bytes buffer.
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)]
pub struct Bytes(bytes::Bytes);
impl From<Bytes> for bytes::Bytes {
@@ -80,7 +80,7 @@ impl PartialEq<Bytes> for [u8] {
///
/// Now this buffer is restricted to only hold valid UTF-8 string (only allow constructing `StringBytes`
/// from String or str). We may support other encoding in the future.
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct StringBytes(bytes::Bytes);
impl StringBytes {

View File

@@ -5,10 +5,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
async-trait = "0.1"
common-error = { path = "../error" }
common-telemetry = { path = "../telemetry" }
datatypes = { path = "../../datatypes" }
common-error = { workspace = true }
serde.workspace = true
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -29,6 +29,10 @@ pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
pub const SCRIPTS_TABLE_ID: u32 = 1;
/// numbers table id
pub const NUMBERS_TABLE_ID: u32 = 2;
/// id for information_schema.tables
pub const INFORMATION_SCHEMA_TABLES_TABLE_ID: u32 = 3;
/// id for information_schema.columns
pub const INFORMATION_SCHEMA_COLUMNS_TABLE_ID: u32 = 4;
pub const MITO_ENGINE: &str = "mito";
pub const IMMUTABLE_FILE_ENGINE: &str = "file";

View File

@@ -32,6 +32,33 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
}
}
/// Attempt to parse catalog and schema from given database name
///
/// The database name may come from different sources:
///
/// - MySQL `schema` name in MySQL protocol login request: it's optional and user
/// and switch database using `USE` command
/// - Postgres `database` parameter in Postgres wire protocol, required
/// - HTTP RESTful API: the database parameter, optional
/// - gRPC: the dbname field in header, optional but has a higher priority than
/// original catalog/schema
///
/// When database name is provided, we attempt to parse catalog and schema from
/// it. We assume the format `[<catalog>-]<schema>`:
///
/// - If `[<catalog>-]` part is not provided, we use whole database name as
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
if parts.len() == 2 {
(parts[0], parts[1])
} else {
(DEFAULT_CATALOG_NAME, db)
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -41,4 +68,22 @@ mod tests {
assert_eq!("test", build_db_string(DEFAULT_CATALOG_NAME, "test"));
assert_eq!("a0b1c2d3-test", build_db_string("a0b1c2d3", "test"));
}
#[test]
fn test_parse_catalog_and_schema() {
assert_eq!(
(DEFAULT_CATALOG_NAME, "fullschema"),
parse_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
("catalog", "schema"),
parse_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
("catalog", "schema1-schema2"),
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
}
}

View File

@@ -5,8 +5,8 @@ edition.workspace = true
license.workspace = true
[dependencies]
arrow.workspace = true
arrow-schema.workspace = true
arrow.workspace = true
async-compression = { version = "0.3", features = [
"bzip2",
"gzip",
@@ -17,20 +17,20 @@ async-compression = { version = "0.3", features = [
] }
async-trait.workspace = true
bytes = "1.1"
common-base = { path = "../base" }
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
common-error = { workspace = true }
common-runtime = { workspace = true }
datafusion.workspace = true
derive_builder = "0.12"
derive_builder.workspace = true
futures.workspace = true
object-store = { path = "../../object-store" }
object-store = { workspace = true }
orc-rust = "0.2"
paste = "1.0"
regex = "1.7"
snafu.workspace = true
tokio.workspace = true
strum = { version = "0.21", features = ["derive"] }
tokio-util.workspace = true
tokio.workspace = true
url = "2.3"
paste = "1.0"
[dev-dependencies]
common-test-util = { path = "../test-util" }
common-test-util = { workspace = true }

View File

@@ -20,11 +20,12 @@ use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdD
use async_compression::tokio::write;
use bytes::Bytes;
use futures::Stream;
use strum::EnumIter;
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
use tokio_util::io::{ReaderStream, StreamReader};
use crate::error::{self, Error, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)]
pub enum CompressionType {
/// Gzip-ed file
Gzip,

View File

@@ -30,8 +30,8 @@ use arrow::record_batch::RecordBatch;
use arrow_schema::{ArrowError, Schema as ArrowSchema};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use datafusion::datasource::physical_plan::FileOpenFuture;
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::file_format::FileOpenFuture;
use datafusion::physical_plan::SendableRecordBatchStream;
use futures::StreamExt;
use object_store::ObjectStore;

View File

@@ -23,8 +23,8 @@ use arrow::record_batch::RecordBatch;
use arrow_schema::{Schema, SchemaRef};
use async_trait::async_trait;
use common_runtime;
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::error::Result as DataFusionResult;
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::physical_plan::SendableRecordBatchStream;
use derive_builder::Builder;
use object_store::ObjectStore;
@@ -209,15 +209,19 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
#[cfg(test)]
mod tests {
use common_test_util::find_workspace_path;
use super::*;
use crate::file_format::{
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
FORMAT_SCHEMA_INFER_MAX_RECORD,
};
use crate::test_util::{self, format_schema, test_store};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/csv").display().to_string()
find_workspace_path("/src/common/datasource/tests/csv")
.display()
.to_string()
}
#[tokio::test]

View File

@@ -26,8 +26,8 @@ use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
use async_trait::async_trait;
use common_runtime;
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::physical_plan::SendableRecordBatchStream;
use object_store::ObjectStore;
use snafu::ResultExt;
@@ -167,12 +167,16 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
#[cfg(test)]
mod tests {
use common_test_util::find_workspace_path;
use super::*;
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
use crate::test_util::{self, format_schema, test_store};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/json").display().to_string()
find_workspace_path("/src/common/datasource/tests/json")
.display()
.to_string()
}
#[tokio::test]

View File

@@ -20,8 +20,8 @@ use arrow::compute::cast;
use arrow_schema::{ArrowError, Schema, SchemaRef};
use async_trait::async_trait;
use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch;
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::error::{DataFusionError, Result as DfResult};
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::physical_plan::RecordBatchStream;
use futures::{Stream, StreamExt, TryStreamExt};
use object_store::ObjectStore;
@@ -188,19 +188,22 @@ impl FileOpener for OrcOpener {
#[cfg(test)]
mod tests {
use common_test_util::find_workspace_path;
use super::*;
use crate::file_format::FileFormat;
use crate::test_util::{self, format_schema, test_store};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/orc").display().to_string()
find_workspace_path("/src/common/datasource/tests/orc")
.display()
.to_string()
}
#[tokio::test]
async fn test_orc_infer_schema() {
let orc = OrcFormat::default();
let store = test_store(&test_data_root());
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
let schema = OrcFormat.infer_schema(&store, "test.orc").await.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(

View File

@@ -18,13 +18,13 @@ use std::sync::Arc;
use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
use async_trait::async_trait;
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
use datafusion::error::Result as DatafusionResult;
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
use datafusion::parquet::arrow::{parquet_to_arrow_schema, ArrowWriter};
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
use datafusion::parquet::file::metadata::ParquetMetaData;
use datafusion::parquet::format::FileMetaData;
use datafusion::physical_plan::file_format::{FileMeta, ParquetFileReaderFactory};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use futures::future::BoxFuture;
use object_store::{ObjectStore, Reader};
@@ -158,11 +158,13 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
#[cfg(test)]
mod tests {
use common_test_util::find_workspace_path;
use super::*;
use crate::test_util::{self, format_schema, test_store};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/parquet")
find_workspace_path("/src/common/datasource/tests/parquet")
.display()
.to_string()
}

View File

@@ -17,9 +17,10 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::vec;
use common_test_util::find_workspace_path;
use datafusion::assert_batches_eq;
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
use datafusion::execution::context::TaskContext;
use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream, ParquetExec};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::SessionContext;
@@ -71,7 +72,7 @@ async fn test_json_opener() {
CompressionType::Uncompressed,
);
let path = &test_util::get_data_dir("tests/json/basic.json")
let path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
.display()
.to_string();
let tests = [
@@ -111,7 +112,7 @@ async fn test_csv_opener() {
let store = test_store("/");
let schema = test_basic_schema();
let path = &test_util::get_data_dir("tests/csv/basic.csv")
let path = &find_workspace_path("/src/common/datasource/tests/csv/basic.csv")
.display()
.to_string();
let csv_conf = CsvConfigBuilder::default()
@@ -160,7 +161,7 @@ async fn test_parquet_exec() {
let schema = test_basic_schema();
let path = &test_util::get_data_dir("tests/parquet/basic.parquet")
let path = &find_workspace_path("/src/common/datasource/tests/parquet/basic.parquet")
.display()
.to_string();
let base_config = scan_config(schema.clone(), None, path);
@@ -181,7 +182,7 @@ async fn test_parquet_exec() {
.await;
assert_batches_eq!(
vec![
[
"+-----+-------+",
"| num | str |",
"+-----+-------+",
@@ -196,14 +197,15 @@ async fn test_parquet_exec() {
#[tokio::test]
async fn test_orc_opener() {
let root = test_util::get_data_dir("tests/orc").display().to_string();
let root = find_workspace_path("/src/common/datasource/tests/orc")
.display()
.to_string();
let store = test_store(&root);
let orc = OrcFormat::default();
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
let schema = OrcFormat.infer_schema(&store, "test.orc").await.unwrap();
let schema = Arc::new(schema);
let orc_opener = OrcOpener::new(store.clone(), schema.clone(), None);
let path = &test_util::get_data_dir("/test.orc").display().to_string();
let path = "test.orc";
let tests = [
Test {

View File

@@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::physical_plan::file_format::{FileScanConfig, FileStream};
use datafusion::datasource::physical_plan::{FileScanConfig, FileStream};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use object_store::services::Fs;
use object_store::ObjectStore;
@@ -31,13 +30,6 @@ use crate::test_util;
pub const TEST_BATCH_SIZE: usize = 100;
pub fn get_data_dir(path: &str) -> PathBuf {
// https://doc.rust-lang.org/cargo/reference/environment-variables.html
let dir = env!("CARGO_MANIFEST_DIR");
PathBuf::from(dir).join(path)
}
pub fn format_schema(schema: Schema) -> Vec<String> {
schema
.fields()
@@ -78,6 +70,9 @@ pub fn test_basic_schema() -> SchemaRef {
}
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
// object_store only recognize the Unix style path, so make it happy.
let filename = &filename.replace('\\', "/");
FileScanConfig {
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
file_schema,
@@ -86,7 +81,7 @@ pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str)
projection: None,
limit,
table_partition_cols: vec![],
output_ordering: None,
output_ordering: vec![],
infinite_source: false,
}
}
@@ -124,12 +119,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
// ignores `\n`
assert_eq!(
String::from_utf8_lossy(&written).trim_end_matches('\n'),
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
)
assert_eq_lines(written, origin);
}
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
@@ -166,10 +156,19 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
assert_eq_lines(written, origin);
}
// ignores `\n`
// Ignore the CRLF difference across operating systems.
fn assert_eq_lines(written: Vec<u8>, origin: Vec<u8>) {
assert_eq!(
String::from_utf8_lossy(&written).trim_end_matches('\n'),
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
String::from_utf8(written)
.unwrap()
.lines()
.collect::<Vec<_>>(),
String::from_utf8(origin)
.unwrap()
.lines()
.collect::<Vec<_>>(),
)
}

View File

@@ -12,50 +12,36 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_test_util::find_workspace_path;
use crate::test_util;
#[tokio::test]
async fn test_stream_to_json() {
let origin_path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
.display()
.to_string();
// A small threshold
// Triggers the flush each writes
test_util::setup_stream_to_json_test(
&test_util::get_data_dir("tests/json/basic.json")
.display()
.to_string(),
|size| size / 2,
)
.await;
test_util::setup_stream_to_json_test(origin_path, |size| size / 2).await;
// A large threshold
// Only triggers the flush at last
test_util::setup_stream_to_json_test(
&test_util::get_data_dir("tests/json/basic.json")
.display()
.to_string(),
|size| size * 2,
)
.await;
test_util::setup_stream_to_json_test(origin_path, |size| size * 2).await;
}
#[tokio::test]
async fn test_stream_to_csv() {
let origin_path = &find_workspace_path("/src/common/datasource/tests/csv/basic.csv")
.display()
.to_string();
// A small threshold
// Triggers the flush each writes
test_util::setup_stream_to_csv_test(
&test_util::get_data_dir("tests/csv/basic.csv")
.display()
.to_string(),
|size| size / 2,
)
.await;
test_util::setup_stream_to_csv_test(origin_path, |size| size / 2).await;
// A large threshold
// Only triggers the flush at last
test_util::setup_stream_to_csv_test(
&test_util::get_data_dir("tests/csv/basic.csv")
.display()
.to_string(),
|size| size * 2,
)
.await;
test_util::setup_stream_to_csv_test(origin_path, |size| size * 2).await;
}

View File

@@ -66,6 +66,7 @@ mod tests {
}
}
#[cfg(not(windows))]
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
@@ -75,6 +76,16 @@ mod tests {
assert_eq!(parsed.path(), "/to/path/");
}
#[cfg(windows)]
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("C:\\to\\path\\file").unwrap();
assert_eq!(parsed.path(), "/C:/to/path/file");
let parsed = Url::from_directory_path("C:\\to\\path\\").unwrap();
assert_eq!(parsed.path(), "/C:/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {

View File

@@ -8,15 +8,15 @@ license.workspace = true
proc-macro = true
[dependencies]
common-telemetry = { path = "../telemetry" }
backtrace = "0.3"
common-telemetry = { workspace = true }
proc-macro2 = "1.0.66"
quote = "1.0"
syn = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
arc-swap = "1.0"
common-query = { path = "../query" }
datatypes = { path = "../../datatypes" }
common-query = { workspace = true }
datatypes = { workspace = true }
snafu.workspace = true
static_assertions = "1.1.0"

View File

@@ -146,7 +146,9 @@ pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
.expect("Expected an ident!")
.to_string();
if ident == "depth" {
let Lit::Int(i) = &name_value.lit else { panic!("Expected 'depth' to be a valid int!") };
let Lit::Int(i) = &name_value.lit else {
panic!("Expected 'depth' to be a valid int!")
};
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
break;
}

View File

@@ -7,12 +7,12 @@ license.workspace = true
[dependencies]
arc-swap = "1.0"
chrono-tz = "0.6"
common-error = { path = "../error" }
common-function-macro = { path = "../function-macro" }
common-query = { path = "../query" }
common-time = { path = "../time" }
common-error = { workspace = true }
common-function-macro = { workspace = true }
common-query = { workspace = true }
common-time = { workspace = true }
datafusion.workspace = true
datatypes = { path = "../../datatypes" }
datatypes = { workspace = true }
libc = "0.2"
num = "0.4"
num-traits = "0.2"

View File

@@ -89,7 +89,7 @@ mod tests {
#[test]
fn test_function_registry() {
let registry = FunctionRegistry::default();
let func = Arc::new(TestAndFunction::default());
let func = Arc::new(TestAndFunction);
assert!(registry.get_function("test_and").is_none());
assert!(registry.functions().is_empty());

View File

@@ -15,18 +15,64 @@
mod pow;
mod rate;
use std::fmt;
use std::sync::Arc;
use common_query::error::{GeneralDataFusionSnafu, Result};
use common_query::prelude::Signature;
use datafusion::error::DataFusionError;
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::VectorRef;
pub use pow::PowFunction;
pub use rate::RateFunction;
use snafu::ResultExt;
use super::function::FunctionContext;
use super::Function;
use crate::scalars::function_registry::FunctionRegistry;
pub(crate) struct MathFunction;
impl MathFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(PowFunction::default()));
registry.register(Arc::new(RateFunction::default()))
registry.register(Arc::new(PowFunction));
registry.register(Arc::new(RateFunction));
registry.register(Arc::new(RangeFunction))
}
}
/// `RangeFunction` will never be used as a normal function,
/// just for datafusion to generate logical plan for RangeSelect
#[derive(Clone, Debug, Default)]
struct RangeFunction;
impl fmt::Display for RangeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RANGE_FN")
}
}
impl Function for RangeFunction {
fn name(&self) -> &str {
"range_fn"
}
// range_fn will never been used, return_type could be arbitrary value, is not important
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
/// In fact, the arguments loaded by `range_fn` are very complicated, and it is difficult to use `Signature` to describe
fn signature(&self) -> Signature {
Signature::any(0, Volatility::Immutable)
}
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
Err(DataFusionError::Internal(
"range_fn just a empty function used in range select, It should not be eval!".into(),
))
.context(GeneralDataFusionSnafu)
}
}

View File

@@ -85,7 +85,7 @@ mod tests {
use super::*;
#[test]
fn test_pow_function() {
let pow = PowFunction::default();
let pow = PowFunction;
assert_eq!("pow", pow.name());
assert_eq!(

View File

@@ -80,7 +80,7 @@ mod tests {
use super::*;
#[test]
fn test_rate_function() {
let rate = RateFunction::default();
let rate = RateFunction;
assert_eq!("prom_rate", rate.name());
assert_eq!(
ConcreteDataType::float64_datatype(),

View File

@@ -25,6 +25,6 @@ pub(crate) struct NumpyFunction;
impl NumpyFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ClipFunction::default()));
registry.register(Arc::new(ClipFunction));
}
}

View File

@@ -156,7 +156,7 @@ mod tests {
#[test]
fn test_clip_signature() {
let clip = ClipFunction::default();
let clip = ClipFunction;
assert_eq!("clip", clip.name());
assert_eq!(
@@ -202,8 +202,6 @@ mod tests {
#[test]
fn test_clip_fn_signed() {
let clip = ClipFunction::default();
// eval with signed integers
let args: Vec<VectorRef> = vec![
Arc::new(Int32Vector::from_values(0..10)),
@@ -217,7 +215,9 @@ mod tests {
)),
];
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
@@ -234,8 +234,6 @@ mod tests {
#[test]
fn test_clip_fn_unsigned() {
let clip = ClipFunction::default();
// eval with unsigned integers
let args: Vec<VectorRef> = vec![
Arc::new(UInt8Vector::from_values(0..10)),
@@ -249,7 +247,9 @@ mod tests {
)),
];
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
@@ -266,8 +266,6 @@ mod tests {
#[test]
fn test_clip_fn_float() {
let clip = ClipFunction::default();
// eval with floats
let args: Vec<VectorRef> = vec![
Arc::new(Int8Vector::from_values(0..10)),
@@ -281,7 +279,9 @@ mod tests {
)),
];
let vector = clip.eval(FunctionContext::default(), &args).unwrap();
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]

View File

@@ -291,7 +291,7 @@ mod tests {
];
let vector = interp(&args).unwrap();
assert_eq!(4, vector.len());
let res = vec![3.0, 3.0, 2.5, 0.0];
let res = [3.0, 3.0, 2.5, 0.0];
for (i, item) in res.iter().enumerate().take(vector.len()) {
assert!(matches!(vector.get(i),Value::Float64(v) if v==*item));
}
@@ -305,7 +305,7 @@ mod tests {
let left = vec![-1];
let right = vec![2];
let expect = vec![-1.0, 3.0, 2.5, 2.0, 0.0, 2.0];
let expect = [-1.0, 3.0, 2.5, 2.0, 0.0, 2.0];
let args: Vec<VectorRef> = vec![
Arc::new(Float64Vector::from_vec(x)),

View File

@@ -22,6 +22,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction::default()));
registry.register(Arc::new(ToUnixtimeFunction));
}
}

View File

@@ -162,7 +162,7 @@ mod tests {
#[test]
fn test_string_to_unixtime() {
let f = ToUnixtimeFunction::default();
let f = ToUnixtimeFunction;
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
@@ -190,7 +190,7 @@ mod tests {
Some("2022-06-30T23:59:60Z"),
Some("invalid_time_stamp"),
];
let results = vec![Some(1677652502), None, Some(1656633600), None];
let results = [Some(1677652502), None, Some(1656633600), None];
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
@@ -211,7 +211,7 @@ mod tests {
#[test]
fn test_int_to_unixtime() {
let f = ToUnixtimeFunction::default();
let f = ToUnixtimeFunction;
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
@@ -234,7 +234,7 @@ mod tests {
));
let times = vec![Some(3_i64), None, Some(5_i64), None];
let results = vec![Some(3), None, Some(5), None];
let results = [Some(3), None, Some(5), None];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
@@ -255,7 +255,7 @@ mod tests {
#[test]
fn test_timestamp_to_unixtime() {
let f = ToUnixtimeFunction::default();
let f = ToUnixtimeFunction;
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
@@ -283,7 +283,7 @@ mod tests {
Some(TimestampSecond::new(42)),
None,
];
let results = vec![Some(123), None, Some(42), None];
let results = [Some(123), None, Some(42), None];
let ts_vector: TimestampSecondVector = build_vector_from_slice(&times);
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();

View File

@@ -77,7 +77,7 @@ mod tests {
#[test]
fn test_create_udf() {
let f = Arc::new(TestAndFunction::default());
let f = Arc::new(TestAndFunction);
let args: Vec<VectorRef> = vec![
Arc::new(ConstantVector::new(

View File

@@ -0,0 +1,27 @@
[package]
name = "common-greptimedb-telemetry"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
async-trait.workspace = true
common-error = { workspace = true }
common-runtime = { workspace = true }
common-telemetry = { workspace = true }
reqwest = { version = "0.11", features = [
"json",
"rustls-tls",
], default-features = false }
serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
common-test-util = { workspace = true }
hyper = { version = "0.14", features = ["full"] }
tempfile.workspace = true
[build-dependencies]
common-version = { workspace = true }

View File

@@ -0,0 +1,17 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
fn main() {
common_version::setup_git_versions();
}

View File

@@ -0,0 +1,417 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env;
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::time::Duration;
use common_runtime::error::{Error, Result};
use common_runtime::{BoxedTaskFunction, RepeatedTask, Runtime, TaskFunction};
use common_telemetry::{debug, info};
use reqwest::{Client, Response};
use serde::{Deserialize, Serialize};
/// The URL to report telemetry data.
pub const TELEMETRY_URL: &str = "https://api.greptime.cloud/db/otel/statistics";
/// The local installation uuid cache file
const UUID_FILE_NAME: &str = ".greptimedb-telemetry-uuid";
/// The default interval of reporting telemetry data to greptime cloud
pub static TELEMETRY_INTERVAL: Duration = Duration::from_secs(60 * 30);
/// The default connect timeout to greptime cloud.
const GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
/// The default request timeout to greptime cloud.
const GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
pub enum GreptimeDBTelemetryTask {
Enable(RepeatedTask<Error>),
Disable,
}
impl GreptimeDBTelemetryTask {
pub fn enable(interval: Duration, task_fn: BoxedTaskFunction<Error>) -> Self {
GreptimeDBTelemetryTask::Enable(RepeatedTask::new(interval, task_fn))
}
pub fn disable() -> Self {
GreptimeDBTelemetryTask::Disable
}
pub fn start(&self, runtime: Runtime) -> Result<()> {
print_anonymous_usage_data_disclaimer();
match self {
GreptimeDBTelemetryTask::Enable(task) => task.start(runtime),
GreptimeDBTelemetryTask::Disable => Ok(()),
}
}
pub async fn stop(&self) -> Result<()> {
match self {
GreptimeDBTelemetryTask::Enable(task) => task.stop().await,
GreptimeDBTelemetryTask::Disable => Ok(()),
}
}
}
/// Telemetry data to report
#[derive(Serialize, Deserialize, Debug)]
struct StatisticData {
/// Operating system name, such as `linux`, `windows` etc.
pub os: String,
/// The greptimedb version
pub version: String,
/// The architecture of the CPU, such as `x86`, `x86_64` etc.
pub arch: String,
/// The running mode, `standalone` or `distributed`.
pub mode: Mode,
/// The git commit revision of greptimedb
pub git_commit: String,
/// The node number
pub nodes: Option<i32>,
/// The local installation uuid
pub uuid: String,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Mode {
Distributed,
Standalone,
}
#[async_trait::async_trait]
pub trait Collector {
fn get_version(&self) -> String {
env!("CARGO_PKG_VERSION").to_string()
}
fn get_git_hash(&self) -> String {
env!("GIT_COMMIT").to_string()
}
fn get_os(&self) -> String {
env::consts::OS.to_string()
}
fn get_arch(&self) -> String {
env::consts::ARCH.to_string()
}
fn get_mode(&self) -> Mode;
fn get_retry(&self) -> i32;
fn inc_retry(&mut self);
fn set_uuid_cache(&mut self, uuid: String);
fn get_uuid_cache(&self) -> Option<String>;
async fn get_nodes(&self) -> Option<i32>;
fn get_uuid(&mut self, working_home: &Option<String>) -> Option<String> {
match self.get_uuid_cache() {
Some(uuid) => Some(uuid),
None => {
if self.get_retry() > 3 {
return None;
}
match default_get_uuid(working_home) {
Some(uuid) => {
self.set_uuid_cache(uuid.clone());
Some(uuid)
}
None => {
self.inc_retry();
None
}
}
}
}
}
}
fn print_anonymous_usage_data_disclaimer() {
info!("Attention: GreptimeDB now collects anonymous usage data to help improve its roadmap and prioritize features.");
info!(
"To learn more about this anonymous program and how to deactivate it if you don't want to participate, please visit the following URL: ");
info!("https://docs.greptime.com/reference/telemetry");
}
pub fn default_get_uuid(working_home: &Option<String>) -> Option<String> {
let temp_dir = env::temp_dir();
let mut path = PathBuf::new();
path.push(
working_home
.as_ref()
.map(Path::new)
.unwrap_or_else(|| temp_dir.as_path()),
);
path.push(UUID_FILE_NAME);
let path = path.as_path();
match std::fs::read(path) {
Ok(bytes) => Some(String::from_utf8_lossy(&bytes).to_string()),
Err(e) => {
if e.kind() == ErrorKind::NotFound {
let uuid = uuid::Uuid::new_v4().to_string();
let _ = std::fs::write(path, uuid.as_bytes());
Some(uuid)
} else {
None
}
}
}
}
/// Report version info to GreptimeDB.
/// We do not collect any identity-sensitive information.
/// This task is scheduled to run every 30 minutes.
/// The task will be disabled default. It can be enabled by setting the build feature `greptimedb-telemetry`
/// Collector is used to collect the version info. It can be implemented by different components.
/// client is used to send the HTTP request to GreptimeDB.
/// telemetry_url is the GreptimeDB url.
pub struct GreptimeDBTelemetry {
statistics: Box<dyn Collector + Send + Sync>,
client: Option<Client>,
working_home: Option<String>,
telemetry_url: &'static str,
}
#[async_trait::async_trait]
impl TaskFunction<Error> for GreptimeDBTelemetry {
fn name(&self) -> &str {
"Greptimedb-telemetry-task"
}
async fn call(&mut self) -> Result<()> {
self.report_telemetry_info().await;
Ok(())
}
}
impl GreptimeDBTelemetry {
pub fn new(working_home: Option<String>, statistics: Box<dyn Collector + Send + Sync>) -> Self {
let client = Client::builder()
.connect_timeout(GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT)
.timeout(GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT)
.build();
Self {
working_home,
statistics,
client: client.ok(),
telemetry_url: TELEMETRY_URL,
}
}
pub async fn report_telemetry_info(&mut self) -> Option<Response> {
match self.statistics.get_uuid(&self.working_home) {
Some(uuid) => {
let data = StatisticData {
os: self.statistics.get_os(),
version: self.statistics.get_version(),
git_commit: self.statistics.get_git_hash(),
arch: self.statistics.get_arch(),
mode: self.statistics.get_mode(),
nodes: self.statistics.get_nodes().await,
uuid,
};
if let Some(client) = self.client.as_ref() {
info!("reporting greptimedb version: {:?}", data);
let result = client.post(self.telemetry_url).json(&data).send().await;
debug!("report version result: {:?}", result);
result.ok()
} else {
None
}
}
None => None,
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use std::env;
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
use common_test_util::ports;
use hyper::service::{make_service_fn, service_fn};
use hyper::Server;
use reqwest::Client;
use tokio::spawn;
use crate::{default_get_uuid, Collector, GreptimeDBTelemetry, Mode, StatisticData};
static COUNT: AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
async fn echo(req: hyper::Request<hyper::Body>) -> hyper::Result<hyper::Response<hyper::Body>> {
let path = req.uri().path();
if path == "/req-cnt" {
let body = hyper::Body::from(format!(
"{}",
COUNT.load(std::sync::atomic::Ordering::SeqCst)
));
Ok(hyper::Response::new(body))
} else {
COUNT.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Ok(hyper::Response::new(req.into_body()))
}
}
#[tokio::test]
async fn test_gretimedb_telemetry() {
let (tx, rx) = tokio::sync::oneshot::channel::<()>();
let port: u16 = ports::get_port() as u16;
spawn(async move {
let make_svc = make_service_fn(|_conn| {
// This is the `Service` that will handle the connection.
// `service_fn` is a helper to convert a function that
// returns a Response into a `Service`.
async { Ok::<_, Infallible>(service_fn(echo)) }
});
let addr = ([127, 0, 0, 1], port).into();
let server = Server::bind(&addr).serve(make_svc);
let graceful = server.with_graceful_shutdown(async {
rx.await.ok();
});
let _ = graceful.await;
Ok::<_, Infallible>(())
});
struct TestStatistic;
struct FailedStatistic;
#[async_trait::async_trait]
impl Collector for TestStatistic {
fn get_mode(&self) -> Mode {
Mode::Standalone
}
async fn get_nodes(&self) -> Option<i32> {
Some(1)
}
fn get_retry(&self) -> i32 {
unimplemented!()
}
fn inc_retry(&mut self) {
unimplemented!()
}
fn set_uuid_cache(&mut self, _: String) {
unimplemented!()
}
fn get_uuid_cache(&self) -> Option<String> {
unimplemented!()
}
fn get_uuid(&mut self, _working_home: &Option<String>) -> Option<String> {
Some("test".to_string())
}
}
#[async_trait::async_trait]
impl Collector for FailedStatistic {
fn get_mode(&self) -> Mode {
Mode::Standalone
}
async fn get_nodes(&self) -> Option<i32> {
None
}
fn get_retry(&self) -> i32 {
unimplemented!()
}
fn inc_retry(&mut self) {
unimplemented!()
}
fn set_uuid_cache(&mut self, _: String) {
unimplemented!()
}
fn get_uuid_cache(&self) -> Option<String> {
unimplemented!()
}
fn get_uuid(&mut self, _working_home: &Option<String>) -> Option<String> {
None
}
}
let working_home_temp = tempfile::Builder::new()
.prefix("greptimedb_telemetry")
.tempdir()
.unwrap();
let working_home = working_home_temp.path().to_str().unwrap().to_string();
let test_statistic = Box::new(TestStatistic);
let mut test_report = GreptimeDBTelemetry::new(Some(working_home.clone()), test_statistic);
let url = Box::leak(format!("{}:{}", "http://localhost", port).into_boxed_str());
test_report.telemetry_url = url;
let response = test_report.report_telemetry_info().await.unwrap();
let body = response.json::<StatisticData>().await.unwrap();
assert_eq!(env::consts::ARCH, body.arch);
assert_eq!(env::consts::OS, body.os);
assert_eq!(env!("CARGO_PKG_VERSION"), body.version);
assert_eq!(env!("GIT_COMMIT"), body.git_commit);
assert_eq!(Mode::Standalone, body.mode);
assert_eq!(1, body.nodes.unwrap());
let failed_statistic = Box::new(FailedStatistic);
let mut failed_report = GreptimeDBTelemetry::new(Some(working_home), failed_statistic);
failed_report.telemetry_url = url;
let response = failed_report.report_telemetry_info().await;
assert!(response.is_none());
let client = Client::builder()
.connect_timeout(Duration::from_secs(3))
.timeout(Duration::from_secs(3))
.build()
.unwrap();
let cnt_url = format!("{}/req-cnt", url);
let response = client.get(cnt_url).send().await.unwrap();
let body = response.text().await.unwrap();
assert_eq!("1", body);
tx.send(()).unwrap();
}
#[test]
fn test_get_uuid() {
let working_home_temp = tempfile::Builder::new()
.prefix("greptimedb_telemetry")
.tempdir()
.unwrap();
let working_home = working_home_temp.path().to_str().unwrap().to_string();
let uuid = default_get_uuid(&Some(working_home.clone()));
assert!(uuid.is_some());
assert_eq!(uuid, default_get_uuid(&Some(working_home.clone())));
assert_eq!(uuid, default_get_uuid(&Some(working_home.clone())));
}
}

View File

@@ -5,18 +5,17 @@ edition.workspace = true
license.workspace = true
[dependencies]
api = { path = "../../api" }
api = { workspace = true }
async-trait.workspace = true
common-base = { path = "../base" }
common-catalog = { path = "../catalog" }
common-error = { path = "../error" }
common-grpc = { path = "../grpc" }
common-query = { path = "../query" }
common-telemetry = { path = "../telemetry" }
common-time = { path = "../time" }
datatypes = { path = "../../datatypes" }
common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-query = { workspace = true }
common-telemetry = { workspace = true }
common-time = { workspace = true }
datatypes = { workspace = true }
snafu = { version = "0.7", features = ["backtraces"] }
table = { path = "../../table" }
table = { workspace = true }
[dev-dependencies]
paste = "1.0"

View File

@@ -16,24 +16,27 @@ use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::column::{SemanticType, Values};
use api::v1::column::Values;
use api::v1::{
AddColumn, AddColumns, Column, ColumnDataType, ColumnDef, CreateTableExpr,
InsertRequest as GrpcInsertRequest,
InsertRequest as GrpcInsertRequest, SemanticType,
};
use common_base::BitVec;
use common_time::time::Time;
use common_time::timestamp::Timestamp;
use common_time::{Date, DateTime};
use common_time::{Date, DateTime, Interval};
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{ValueRef, VectorRef};
use datatypes::scalars::ScalarVector;
use datatypes::schema::SchemaRef;
use datatypes::types::{Int16Type, Int8Type, TimeType, TimestampType, UInt16Type, UInt8Type};
use datatypes::types::{
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
};
use datatypes::value::Value;
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int32Vector, Int64Vector, PrimitiveVector, StringVector, TimeMicrosecondVector,
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector,
@@ -216,6 +219,25 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
Time::new_nanosecond(*v)
))
}
ColumnDataType::IntervalYearMonth => {
collect_values!(values.interval_year_month_values, |v| {
ValueRef::Interval(Interval::from_i32(*v))
})
}
ColumnDataType::IntervalDayTime => {
collect_values!(values.interval_day_time_values, |v| {
ValueRef::Interval(Interval::from_i64(*v))
})
}
ColumnDataType::IntervalMonthDayNano => {
collect_values!(values.interval_month_day_nano_values, |v| {
ValueRef::Interval(Interval::from_month_day_nano(
v.months,
v.days,
v.nanoseconds,
))
})
}
}
}
@@ -424,6 +446,21 @@ fn values_to_vector(data_type: &ConcreteDataType, values: Values) -> VectorRef {
)),
},
ConcreteDataType::Interval(unit) => match unit {
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
values.interval_year_month_values,
)),
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
values.interval_day_time_values,
)),
IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values.interval_month_day_nano_values.iter().map(|x| {
Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
}),
))
}
},
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
@@ -553,6 +590,27 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
.map(|v| Value::Time(Time::new_nanosecond(v)))
.collect(),
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
.interval_year_month_values
.into_iter()
.map(|v| Value::Interval(Interval::from_i32(v)))
.collect(),
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
.interval_day_time_values
.into_iter()
.map(|v| Value::Interval(Interval::from_i64(v)))
.collect(),
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
.interval_month_day_nano_values
.into_iter()
.map(|v| {
Value::Interval(Interval::from_month_day_nano(
v.months,
v.days,
v.nanoseconds,
))
})
.collect(),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
@@ -569,16 +627,17 @@ mod tests {
use std::{assert_eq, vec};
use api::helper::ColumnDataTypeWrapper;
use api::v1::column::{self, SemanticType, Values};
use api::v1::{Column, ColumnDataType};
use api::v1::column::Values;
use api::v1::{Column, ColumnDataType, IntervalMonthDayNano, SemanticType};
use common_base::BitVec;
use common_catalog::consts::MITO_ENGINE;
use common_time::interval::IntervalUnit;
use common_time::timestamp::{TimeUnit, Timestamp};
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use datatypes::types::{
TimeMillisecondType, TimeSecondType, TimeType, TimestampMillisecondType,
TimestampSecondType, TimestampType,
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType,
TimeSecondType, TimeType, TimestampMillisecondType, TimestampSecondType, TimestampType,
};
use datatypes::value::Value;
use paste::paste;
@@ -636,8 +695,8 @@ mod tests {
);
let column_defs = create_expr.column_defs;
assert_eq!(column_defs[4].name, create_expr.time_index);
assert_eq!(5, column_defs.len());
assert_eq!(column_defs[5].name, create_expr.time_index);
assert_eq!(6, column_defs.len());
assert_eq!(
ConcreteDataType::string_datatype(),
@@ -695,6 +754,20 @@ mod tests {
)
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ConcreteDataType::from(
ColumnDataTypeWrapper::try_new(
column_defs
.iter()
.find(|c| c.name == "interval")
.unwrap()
.datatype
)
.unwrap()
)
);
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::from(
@@ -728,7 +801,7 @@ mod tests {
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
assert_eq!(3, add_columns.add_columns.len());
assert_eq!(4, add_columns.add_columns.len());
let host_column = &add_columns.add_columns[0];
assert!(host_column.is_key);
@@ -761,6 +834,19 @@ mod tests {
.unwrap()
)
);
let interval_column = &add_columns.add_columns[3];
assert!(!interval_column.is_key);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ConcreteDataType::from(
ColumnDataTypeWrapper::try_new(
interval_column.column_def.as_ref().unwrap().datatype
)
.unwrap()
)
);
}
#[test]
@@ -1005,6 +1091,70 @@ mod tests {
assert_eq!(expect, actual);
}
#[test]
fn test_convert_interval_values() {
// year_month
let actual = convert_values(
&ConcreteDataType::Interval(IntervalType::YearMonth(IntervalYearMonthType)),
Values {
interval_year_month_values: vec![1_i32, 2_i32, 3_i32],
..Default::default()
},
);
let expect = vec![
Value::Interval(Interval::from_year_month(1_i32)),
Value::Interval(Interval::from_year_month(2_i32)),
Value::Interval(Interval::from_year_month(3_i32)),
];
assert_eq!(expect, actual);
// day_time
let actual = convert_values(
&ConcreteDataType::Interval(IntervalType::DayTime(IntervalDayTimeType)),
Values {
interval_day_time_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Interval(Interval::from_i64(1_i64)),
Value::Interval(Interval::from_i64(2_i64)),
Value::Interval(Interval::from_i64(3_i64)),
];
assert_eq!(expect, actual);
// month_day_nano
let actual = convert_values(
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
Values {
interval_month_day_nano_values: vec![
IntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
},
IntervalMonthDayNano {
months: 5,
days: 6,
nanoseconds: 7,
},
IntervalMonthDayNano {
months: 9,
days: 10,
nanoseconds: 11,
},
],
..Default::default()
},
);
let expect = vec![
Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
];
assert_eq!(expect, actual);
}
#[test]
fn test_is_null() {
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);
@@ -1022,7 +1172,7 @@ mod tests {
fn mock_insert_batch() -> (Vec<Column>, u32) {
let row_count = 2;
let host_vals = column::Values {
let host_vals = Values {
string_values: vec!["host1".to_string(), "host2".to_string()],
..Default::default()
};
@@ -1034,7 +1184,7 @@ mod tests {
datatype: ColumnDataType::String as i32,
};
let cpu_vals = column::Values {
let cpu_vals = Values {
f64_values: vec![0.31],
..Default::default()
};
@@ -1046,7 +1196,7 @@ mod tests {
datatype: ColumnDataType::Float64 as i32,
};
let mem_vals = column::Values {
let mem_vals = Values {
f64_values: vec![0.1],
..Default::default()
};
@@ -1058,7 +1208,7 @@ mod tests {
datatype: ColumnDataType::Float64 as i32,
};
let time_vals = column::Values {
let time_vals = Values {
time_millisecond_values: vec![100, 101],
..Default::default()
};
@@ -1070,7 +1220,29 @@ mod tests {
datatype: ColumnDataType::TimeMillisecond as i32,
};
let ts_vals = column::Values {
let interval1 = IntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
};
let interval2 = IntervalMonthDayNano {
months: 4,
days: 5,
nanoseconds: 6,
};
let interval_vals = Values {
interval_month_day_nano_values: vec![interval1, interval2],
..Default::default()
};
let interval_column = Column {
column_name: "interval".to_string(),
semantic_type: SemanticType::Field as i32,
values: Some(interval_vals),
null_mask: vec![0],
datatype: ColumnDataType::IntervalMonthDayNano as i32,
};
let ts_vals = Values {
ts_millisecond_values: vec![100, 101],
..Default::default()
};
@@ -1083,7 +1255,14 @@ mod tests {
};
(
vec![host_column, cpu_column, mem_column, time_column, ts_column],
vec![
host_column,
cpu_column,
mem_column,
time_column,
interval_column,
ts_column,
],
row_count,
)
}

View File

@@ -5,21 +5,18 @@ edition.workspace = true
license.workspace = true
[dependencies]
api = { path = "../../api" }
api = { workspace = true }
arrow-flight.workspace = true
async-trait = "0.1"
backtrace = "0.3"
common-base = { path = "../base" }
common-error = { path = "../error" }
common-function-macro = { path = "../function-macro" }
common-query = { path = "../query" }
common-meta = { path = "../meta" }
common-recordbatch = { path = "../recordbatch" }
common-runtime = { path = "../runtime" }
common-telemetry = { path = "../telemetry" }
common-base = { workspace = true }
common-error = { workspace = true }
common-recordbatch = { workspace = true }
common-runtime = { workspace = true }
common-telemetry = { workspace = true }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../../datatypes" }
datatypes = { workspace = true }
flatbuffers = "23.1"
futures = "0.3"
lazy_static.workspace = true

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use api::v1::{AffectedRows, FlightMetadata};
use arrow_flight::utils::flight_data_to_arrow_batch;
use arrow_flight::{FlightData, IpcMessage, SchemaAsIpc};
use arrow_flight::{FlightData, SchemaAsIpc};
use common_base::bytes::Bytes;
use common_recordbatch::{RecordBatch, RecordBatches};
use datatypes::arrow;
@@ -25,6 +25,7 @@ use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::arrow::ipc::{root_as_message, writer, MessageHeader};
use datatypes::schema::{Schema, SchemaRef};
use flatbuffers::FlatBufferBuilder;
use prost::bytes::Bytes as ProstBytes;
use prost::Message;
use snafu::{OptionExt, ResultExt};
@@ -86,12 +87,12 @@ impl FlightEncoder {
affected_rows: Some(AffectedRows { value: rows as _ }),
}
.encode_to_vec();
FlightData::new(
None,
IpcMessage(build_none_flight_msg().into()),
metadata,
vec![],
)
FlightData {
flight_descriptor: None,
data_header: build_none_flight_msg().into(),
app_metadata: metadata.into(),
data_body: ProstBytes::default(),
}
}
}
}
@@ -248,7 +249,9 @@ mod test {
)
.unwrap();
assert_eq!(flight_data.len(), 3);
let [d1, d2, d3] = flight_data.as_slice() else { unreachable!() };
let [d1, d2, d3] = flight_data.as_slice() else {
unreachable!()
};
let decoder = &mut FlightDecoder::default();
assert!(decoder.schema.is_none());
@@ -262,19 +265,25 @@ mod test {
let message = decoder.try_decode(d1.clone()).unwrap();
assert!(matches!(message, FlightMessage::Schema(_)));
let FlightMessage::Schema(decoded_schema) = message else { unreachable!() };
let FlightMessage::Schema(decoded_schema) = message else {
unreachable!()
};
assert_eq!(decoded_schema, schema);
let _ = decoder.schema.as_ref().unwrap();
let message = decoder.try_decode(d2.clone()).unwrap();
assert!(matches!(message, FlightMessage::Recordbatch(_)));
let FlightMessage::Recordbatch(actual_batch) = message else { unreachable!() };
let FlightMessage::Recordbatch(actual_batch) = message else {
unreachable!()
};
assert_eq!(actual_batch, batch1);
let message = decoder.try_decode(d3.clone()).unwrap();
assert!(matches!(message, FlightMessage::Recordbatch(_)));
let FlightMessage::Recordbatch(actual_batch) = message else { unreachable!() };
let FlightMessage::Recordbatch(actual_batch) = message else {
unreachable!()
};
assert_eq!(actual_batch, batch2);
}

View File

@@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::helper::convert_i128_to_interval;
use api::v1::column::Values;
use common_base::BitVec;
use datatypes::types::{TimeType, TimestampType, WrapperType};
use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, TimeMicrosecondVector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
@@ -192,6 +194,24 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
TimeNanosecondVector,
time_nanosecond_values,
|x| { x.into_native() }
),
(
ConcreteDataType::Interval(IntervalType::YearMonth(_)),
IntervalYearMonthVector,
interval_year_month_values,
|x| { x.into_native() }
),
(
ConcreteDataType::Interval(IntervalType::DayTime(_)),
IntervalDayTimeVector,
interval_day_time_values,
|x| { x.into_native() }
),
(
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)),
IntervalMonthDayNanoVector,
interval_month_day_nano_values,
|x| { convert_i128_to_interval(x.into_native()) }
)
)
}
@@ -222,6 +242,43 @@ mod tests {
assert_eq!(vec![1, 2, 3], values.time_second_values);
}
#[test]
fn test_convert_arrow_array_interval_year_month() {
let array = IntervalYearMonthVector::from(vec![Some(1), Some(2), None, Some(3)]);
let array: VectorRef = Arc::new(array);
let values = values(&[array]).unwrap();
assert_eq!(vec![1, 2, 3], values.interval_year_month_values);
}
#[test]
fn test_convert_arrow_array_interval_day_time() {
let array = IntervalDayTimeVector::from(vec![Some(1), Some(2), None, Some(3)]);
let array: VectorRef = Arc::new(array);
let values = values(&[array]).unwrap();
assert_eq!(vec![1, 2, 3], values.interval_day_time_values);
}
#[test]
fn test_convert_arrow_array_interval_month_day_nano() {
let array = IntervalMonthDayNanoVector::from(vec![Some(1), Some(2), None, Some(3)]);
let array: VectorRef = Arc::new(array);
let values = values(&[array]).unwrap();
(0..3).for_each(|i| {
assert_eq!(values.interval_month_day_nano_values[i].months, 0);
assert_eq!(values.interval_month_day_nano_values[i].days, 0);
assert_eq!(
values.interval_month_day_nano_values[i].nanoseconds,
i as i64 + 1
);
})
}
#[test]
fn test_convert_arrow_arrays_string() {
let array = StringVector::from(vec![

Some files were not shown because too many files have changed in this diff Show More