Compare commits

...

98 Commits

Author SHA1 Message Date
zyy17
24b880f982 ci: push image to dockerhub
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-04 13:05:35 +08:00
Lei, Huang
db2b577628 feat: remote catalog (#315)
* chore: refactor dir for local catalog manager

* refactor: CatalogProvider returns Result

* refactor: SchemaProvider returns Result

* feat: add kv operations to remote catalog

* chore: refactor some code

* feat: impl catalog initialization

* feat: add register table and register system table function

* refactor: add table_info method for Table trait

* chore: add some tests

* chore: add register schema test

* chore: fix build issue after rebase onto develop

* refactor: mock to separate file

* build: failed to compile

* fix: use a container struct to bridge KvBackend and Accessor trait

* feat: upgrade opendal to 0.17

* test: add more tests

* chore: add catalog name and schema name to table info

* chore: add catalog name and schema name to table info

* chore: rebase onto develop

* refactor: common-catalog crate

* chore: refactor dir for local catalog manager

* refactor: CatalogProvider returns Result

* refactor: SchemaProvider returns Result

* feat: add kv operations to remote catalog

* chore: refactor some code

* feat: impl catalog initialization

* feat: add register table and register system table function

* refactor: add table_info method for Table trait

* chore: add some tests

* chore: add register schema test

* chore: fix build issue after rebase onto develop

* refactor: mock to separate file

* build: failed to compile

* fix: use a container struct to bridge KvBackend and Accessor trait

* feat: upgrade opendal to 0.17

* test: add more tests

* chore: add catalog name and schema name to table info

* chore: add catalog name and schema name to table info

* chore: rebase onto develop

* refactor: common-catalog crate

* refactor: remove remote catalog related files

* fix: compilation

* feat: add table version to TableKey

* feat: add node id to TableValue

* fix: some CR comments

* chore: change async fn create_expr_to_request to sync

* fix: add backtrace to errors

* fix: code style

* refactor: merge refactor/catalog-crate

* feat: table key with version

* feat: impl KvBackend for MetaClient

* fix: integrate metaclient

* fix: catalog use local table info as baseline

* fix: sync metsrv

* fix: wip

* fix: update remote catalog on register and deregister

* refactor: CatalogProvider

* refactor: CatalogManager

* fix: catalog key filtering

* fix: pass some test

* refactor: catalog iterating

* fix: CatalogManager::table also requires both catalog_name and schema_name

* chore: merge develop

* chore: merge catalog crate

* fix: adapt to recent meta-client api change

* feat: databode lease

* feat: remote catalog (#356)

* chore: refactor dir for local catalog manager

* refactor: CatalogProvider returns Result

* refactor: SchemaProvider returns Result

* feat: add kv operations to remote catalog

* chore: refactor some code

* feat: impl catalog initialization

* feat: add register table and register system table function

* refactor: add table_info method for Table trait

* chore: add some tests

* chore: add register schema test

* chore: fix build issue after rebase onto develop

* refactor: mock to separate file

* build: failed to compile

* fix: use a container struct to bridge KvBackend and Accessor trait

* feat: upgrade opendal to 0.17

* test: add more tests

* chore: add catalog name and schema name to table info

* chore: add catalog name and schema name to table info

* chore: rebase onto develop

* refactor: common-catalog crate

* chore: refactor dir for local catalog manager

* refactor: CatalogProvider returns Result

* refactor: SchemaProvider returns Result

* feat: add kv operations to remote catalog

* chore: refactor some code

* feat: impl catalog initialization

* feat: add register table and register system table function

* refactor: add table_info method for Table trait

* chore: add some tests

* chore: add register schema test

* chore: fix build issue after rebase onto develop

* refactor: mock to separate file

* build: failed to compile

* fix: use a container struct to bridge KvBackend and Accessor trait

* feat: upgrade opendal to 0.17

* test: add more tests

* chore: add catalog name and schema name to table info

* chore: add catalog name and schema name to table info

* chore: rebase onto develop

* refactor: common-catalog crate

* refactor: remove remote catalog related files

* fix: compilation

* feat: add table version to TableKey

* feat: add node id to TableValue

* fix: some CR comments

* chore: change async fn create_expr_to_request to sync

* fix: add backtrace to errors

* fix: code style

* refactor: merge refactor/catalog-crate

* feat: table key with version

* feat: impl KvBackend for MetaClient

* fix: integrate metaclient

* fix: catalog use local table info as baseline

* fix: sync metsrv

* fix: wip

* fix: update remote catalog on register and deregister

* refactor: CatalogProvider

* refactor: CatalogManager

* fix: catalog key filtering

* fix: pass some test

* refactor: catalog iterating

* fix: CatalogManager::table also requires both catalog_name and schema_name

* chore: merge develop

* chore: merge catalog crate

* fix: adapt to recent meta-client api change

* feat: datanode heartbeat (#355)

* feat: add heartbeat task to instance

* feat: add node_id datanode opts

* fix: use real node id in heartbeat and meta client

* feat: distribute table in frontend

* test: distribute read demo

* test: distribute read demo

* test: distribute read demo

* add write spliter

* fix: node id changed to u64

* feat: datanode uses remote catalog implementation

* dist insert integrate table

* feat: specify region ids on creating table (#359)

* fix: compiling issues

* feat: datanode lease (#354)

* Some glue code about dist_insert

* fix: correctly wrap string value with quotes

* feat: create route

* feat: frontend catalog (#362)

* feat: integrate catalog to frontend

* feat: preserve partition rule on create

* fix: print tables on start

* chore: log in create route

* test: distribute read demo

* feat: support metasrv addr command line options

* feat: optimize DataNodeInstance creation (#368)

* chore: remove unnecessary changes

* chore: revert changes to src/api

* chore: revert changes to src/datanode/src/server.rs

* chore: remove opendal backend

* chore: optimize imports

* chore: revert changes to instance and region ids

* refactor: MetaKvBackend range

* fix: remove some wrap

* refactor: initiation of catalog

* fix: next range request start key

* fix: mock delete range

* refactor: simplify range response handling

Co-authored-by: jiachun <jiachun_fjc@163.com>
Co-authored-by: luofucong <luofucong@greptime.com>
Co-authored-by: fys <1113014250@qq.com>
Co-authored-by: Jiachun Feng <jiachun_feng@proton.me>
2022-11-04 11:43:31 +08:00
Yingwen
cba611b9f5 refactor: Serialize RawSchema/RawTableMeta/RawTableInfo (#382)
* refactor: Serialize Schema/TableMeta/TableInfo to raw structs

* test: Add tests for raw struct conversion

* style: Fix clippy

* refactor: SchemaBuilder::timestamp_index takes Option<usize>

So caller could chain the timestamp_index method call where there is no
timestamp index.

* style(datatypes): Chains SchemaBuilder method calls
2022-11-04 11:25:17 +08:00
zyy17
6aec1b4f90 ci: add workflow of artifacts release (#389)
Signed-off-by: zyy17 <zyylsxm@gmail.com>

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-04 10:55:41 +08:00
Ruihang Xia
6d1dd5e7af fix: also run CI in develop branch (#387)
* fix: also run CI in develop branch

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add develop branch

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-03 18:35:30 +08:00
Jiachun Feng
e19b63f4f5 chore: meta mock test (#379)
* chore: meta mock

* chore: refacor datanode selector

* chore: create route mock test

* chore: add mock module

* chore: memory store for test

* chore: mock meta for test

* chore: ensure memorysotre has the same behavious with etcd

* chore: replace tokio lock to parking_lot
2022-11-03 18:33:29 +08:00
shuiyisong
750310c648 feat: frontend start with instance param (#385)
* chore: fix conflict

* chore: remove unused import
2022-11-03 18:05:01 +08:00
Ruihang Xia
9fd2d4e8db fix: detach grpc tasks to another runtime (#376)
* fix: detach grpc tasks to another runtime

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add runtime size options

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* group an obj-req into one task

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make nitpicking CRer happy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-03 17:24:15 +08:00
元波
77233c20e1 fix: remove unnecessary protocol (#386) 2022-11-03 17:14:08 +08:00
fys
1fad67cf4d feat: grpc client support multi peers (#380)
* feat: grpc client use channel manager

* cr
2022-11-03 11:55:22 +08:00
LFC
5abff7a536 feat: range columns partitioning rule (#374)
* feat: parse partition syntax in "create table"

* feat: partition rule

* fix: rebase develop

* feat: range partitioning rule

* fix: resolve PR comments

* feat: range columns partitioning rule

* refactor: remove unused codes

* fix: resolve PR comments

* fix: resolve PR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-11-02 22:36:32 +08:00
Yingwen
6f1f697bfc feat: Implements shutdown for GrpcServer and HttpServer (#372)
* fix: Fix TestGuard being dropped before grpc test starts

* feat: Let start and shutdown takes immutable reference to self

Also implement shutdown for GrpcServer

* feat: Implement shutdown for HttpServer

* style: Fix clippy

* chore: Add name to AlreadyStarted error
2022-11-02 18:10:41 +08:00
Jiachun Feng
2d4a44414d feat: refactor for test (#375)
* chore: add set_header macro & remove some unnessary code

* chore: channel_mannager with connector
2022-11-01 17:34:54 +08:00
LFC
ea2ebc0e87 feat: range partition rule (#304)
* feat: range partitioning rule

Co-authored-by: luofucong <luofucong@greptime.com>
2022-11-01 16:09:23 +08:00
Jiachun Feng
dacfd12b8f feat: router impl (#363)
* feat: heartbeat lease & route api

* feat: batchput&cas

* chore: demo&ut

* chore: by cr

* chore: datanode selector

* chore: rename with_key_range to with_range

* chore: ut
2022-11-01 11:45:05 +08:00
Ning Sun
518b665f1e feat: Improve http sql api and attempt to add openapi docs (#361)
This patch changes output for our http SQL API and prepare it for our SQL editor development. Changes includes:

- includes aide for OAS 3.1 openapi documents, available at /v1/private/api.json
- simplified some of http handlers return type, use string or json directly
- created new HttpRecordsOutput type to hide internals of RecordBatch from end-user. It also tuned data structure to be friendly for application to consume
-  updated response struct to use code for success or detailed error code

Residual issue #366 

* feat: allow http post for our sql http api

* feat: update our http api and attempt to add openapi spec support

* test: correct test against new handler apis

* refactor: rename rows to records

* refactor: removed HttpResponse completely

* feat: add information to our openapi docs

* feat: add docs for sql interface response

* refactor: use struct to represent query so we can doc it via aide

* refactor: use arc wrapped api

* feat: add redoc UI support

* Update src/servers/src/http.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* Update src/servers/src/http.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* fix: address review comments

* test: update integration tests for new api output

* refactor: make prometheus http apis compatible with recent changes

* refactor: get schema from stream

* test: add test for recordbatch to json serialization

* test: add todo for a test to be fixed later

* Revert "test: add todo for a test to be fixed later"

This reverts commit a5a50c7afb.

* fix: Revert "refactor: get schema from stream"

This reverts commit 945b685556.

* chore: add todo for pending issue #366

* chore: remove fixed server url in openapi docs

* feat: include error_code in json response

* refactor: use code over success field in json response

Co-authored-by: LFC <bayinamine@gmail.com>
2022-10-31 16:20:03 +08:00
Ruihang Xia
e2c28fe374 feat: support data type and schema conversion (#351)
* feat: type and schema transformer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test schema codec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support projection and schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* project schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typos

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/common/substrait/src/df_logical.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* more document about type variations

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-10-31 15:16:13 +08:00
Yingwen
f4e22282a4 feat: Region supports reading data with different schema (#342)
* feat(storage): Implement skeleton of ReadResolver

ReadResolver is used to resolve difference between schemas

* feat(storage): Add user_column_end to ReadResover

* feat(storage): Implement Batch::batch_from_parts

Used to construct Batch from parts according to the schema that user
expects to read.

* feat(storage): Compat memtable schema

* feat(storage): Compat parquet file schema

* fix(storage): ReadResolver supports projection under same schema version

Now ReadResolver takes ProjectedSchemaRef as dest schema, and checks
whether a value column is needed by the schema after projection.

* feat(storage): Check whether columns are same columns

is_source_column_readable() takes ColumnMetadata instead of
ColumnSchema, and compares their column id to check whether they are
same columns.

* refactor(storage): Use row_key_end/user_column_end in source_schema

Rename ReadResolver::is_needed to ReadResolver::is_source_needed, and
remove row_key_end/user_column_end from ReadResolver, since they should
be same as source_schema's

* chore(storage): Remove unused codes

* test(storage): Add tests for the resolver

* feat(storage): Returns error on different source and dest column names

* style(storage): Fix clippy

* refactor: Rename ReadResolver to ReadAdapter

* chore(table): Removed unused comment

* refactor: rename to is_source_column_compatible
2022-10-31 11:42:07 +08:00
dennis zhuang
0604eb7509 feat: prometheus remote write and read (#346)
* feat: scaffold for prometheus protocol handler

* feat: impl remote write and read for prometheus

* chore: make label matchers working in remote reading

* chore: case senstive regexp matching for labers and tweak restful api

* test: prometheus test

* test: adds test for prometheus handler and http server

* fix: typo in comment

* refactor: move snappy_compress and snappy_decompress

* fix: by code review

* fix: collect_timeseries_ids

* fix: timestamp and value column's value may be null
2022-10-28 18:47:16 +08:00
Lei, Huang
81716d622e feat: timestamp column support i64 (#325)
* feat: align_bucket support i64 and timestamp values

* feat: add Int64 to timestamp

* feat: support query i64 timestamp vector

* test: fix failling tests

* refactor: simplify some code

* fix: CR comments and add insert and query test for i64 timestamp column
2022-10-28 18:39:11 +08:00
Ruihang Xia
3e8d9b421c chore: set CI timeout (#358)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-28 11:01:12 +08:00
fys
6d4c0ad5a3 feat: add writespliter (#345)
* Add writespliter

* Partition_rule use reference, not Arc
2022-10-27 10:57:34 +08:00
Jiachun Feng
00966cad69 feat: meta refactor (#339)
* feat: heartbeat handler

* chore: heartbeat handlers lock refactor

* chore: store rpc req/res wrapper

* chore: router rpc/res wrapper

* chore: const method(request_header)

* chore: rm unnessary const fn & refactor HeartbeatHandler

* chore: refactor CreateRequest

* chore: HeartbeatAccumulator

* chore: improve router req/res convert

* fix: register race condition
2022-10-26 11:26:40 +08:00
Lei, Huang
932b30d299 refactor: catalog crate (#331)
* chore: refactor dir for local catalog manager

* refactor: CatalogProvider returns Result

* refactor: SchemaProvider returns Result

* feat: add kv operations to remote catalog

* chore: refactor some code

* feat: impl catalog initialization

* feat: add register table and register system table function

* refactor: add table_info method for Table trait

* chore: add some tests

* chore: add register schema test

* chore: fix build issue after rebase onto develop

* refactor: mock to separate file

* build: failed to compile

* fix: use a container struct to bridge KvBackend and Accessor trait

* feat: upgrade opendal to 0.17

* test: add more tests

* chore: add catalog name and schema name to table info

* chore: add catalog name and schema name to table info

* chore: rebase onto develop

* refactor: common-catalog crate

* refactor: remove remote catalog related files

* fix: compilation

* feat: add table version to TableKey

* feat: add node id to TableValue

* fix: some CR comments

* chore: change async fn create_expr_to_request to sync

* fix: add backtrace to errors

* fix: code style

* fix: CatalogManager::table also requires both catalog_name and schema_name

* chore: merge develop
2022-10-26 10:50:39 +08:00
Ruihang Xia
7fe39e9187 feat: support quering with logical plan in gRPC layer (#344)
* impl logical exec & example

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test on upper api

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add todo to prost dep

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sign the TODO

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-25 16:05:53 +08:00
LFC
2ca667cbdf refactor: make table scan return physical plan (#326)
* refactor: return PhysicalPlan in Table trait's scan method, to support partitioned execution in Frontend's distribute read

* refactor: pub use necessary DataFusion types

* refactor: replace old "PhysicalPlan" and its adapters

Co-authored-by: luofucong <luofucong@greptime.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-10-25 11:34:53 +08:00
Yingwen
64dac51e83 feat: Holds ColumnMetadata in StoreSchema (#333)
* chore: Update StoreSchema comment

* feat: Add metadata to ColumnSchema

* feat: Impl conversion between ColumnMetadata and ColumnSchema

We could use this feature to store the ColumnMetadata as arrow's
Schema, since the ColumnSchema could be further converted to an arrow
schema. Then we could use ColumnMetadata in StoreSchema, which contains
more information, especially the column id.

* feat(storage): Merge schema::Error to metadata::Error

To avoid cyclic dependency of two Errors

* feat(storage): Store ColumnMetadata in StoreSchema

* feat(storage): Use StoreSchemaRef to avoid cloning the whole StoreSchema struct

* test(storage): Fix test_store_schema

* feat(datatypes): Return error on duplicate meta key

* chore: Address CR comments
2022-10-25 11:06:22 +08:00
xiaomin tang
edad6f89b5 docs: Add code_of_conduct adapted from the Contributor Covenant (#340) 2022-10-24 19:04:55 +08:00
Ruihang Xia
8ab43b65ea feat: serialize/deserialize logical and execution plan via substrait (#317)
* fix: change Utf8Array indice type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: remove unused sub-crate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl for both Logical and Execution plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: move test-util subcrate into table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test: table scan logical plan round trip

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* drop support of physical plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename trait fns to encode/decode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* address review comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-24 15:29:33 +08:00
Lei, Huang
6fc45e31e0 fix: put type rewrite optimizer rule at first (#337) 2022-10-24 15:05:59 +08:00
Yingwen
a457c49d99 refactor: Remove column_null_mask in MutationExtra (#314)
* refactor: Remove column_null_mask in MutationExtra

MutationExtra::column_null_mask is no longer needed as we could ensure
there is no missing column in WriteBatch.

* feat(storage): Remove MutationExtra

Just stores MutationType in the WalHeader, no longer needs MutationExtra
2022-10-24 14:53:35 +08:00
Jiachun Feng
b650656ae3 chore: refactor meta protocol (#332)
* chore: refactor channel_config

* chore: refactor grpc protocol

* feat: heartbeat streams
2022-10-21 20:30:57 +08:00
Ruihang Xia
bc9a2df9bf refactor: move test-util subcrate into table (#334)
* refactor: move test-util subcrate into table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: clean comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move MockTableEngine into test-util

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-21 14:39:40 +08:00
LFC
6b0c5281d4 feat: try from DataFusion's ScalarValue for our Value (#329)
* feat: try from DataFusion's ScalarValue for our Value

* Update src/datatypes/src/value.rs

Co-authored-by: Lei, Huang <6406592+v0y4g3r@users.noreply.github.com>

* fix: resolve CR comments

Co-authored-by: luofucong <luofucong@greptime.com>
Co-authored-by: Lei, Huang <6406592+v0y4g3r@users.noreply.github.com>
2022-10-20 20:22:40 +08:00
fys
fad8f442ef feat: modify proto for distribute insert (#327) 2022-10-20 12:41:15 +08:00
Lei, Huang
2d52f19662 feat: add table info (#323)
* refactor: add table_info method for Table trait

* feat: add table_info method to Table trait

* test: add more unit test

* fix: impl table_info for SystemTable

* test: fix failing test
2022-10-20 12:23:44 +08:00
LFC
d5800d0b60 feat: parse partition syntax in "create table" (#298)
* feat: parse partition syntax in "create table"

* Update src/sql/src/parsers/create_parser.rs

Co-authored-by: luofucong <luofucong@greptime.com>
Co-authored-by: Lei, Huang <6406592+v0y4g3r@users.noreply.github.com>
2022-10-20 10:43:15 +08:00
Ruihang Xia
fbea07ea83 chore: remove unused dependencies (#319)
* chore: remove unused dependences

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: recover some dev-deps

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-19 14:08:54 +08:00
Yingwen
87130adf54 docs: Move contributing parts from README to CONTRIBUTING.md (#321) 2022-10-19 14:00:31 +08:00
Yingwen
c147657275 ci: Use docs instead of doc (#322) 2022-10-19 11:56:49 +08:00
Jiachun Feng
d5b34f8917 feat: metasrv (#300)
* meta: meta api&client

* meta: heartbeat server init

* feat: kv store

* chore: grpc server

* chore: meta server bootstrap

* feat: heartbeat client

* feat: route for create table

* chore: a channel pool manager

* feat: route client

* feat: store client

* chore: meta_client example

* chore: change schema

* chore: unit test & by cr

* chore: refactor meta client

* chore: add unit test
2022-10-19 11:02:58 +08:00
Yingwen
4d08ee6fbb fix: Fix broken wal and memtable benchmarks (#320) 2022-10-19 10:54:01 +08:00
dennis zhuang
94b263c261 refactor: datanode instance (#316)
* refactor: datanode Instance

* fix: resolve todo
2022-10-19 10:51:45 +08:00
Yingwen
c6d91edb83 refactor(storage): Split schema mod into multiple sub-mods (#318) 2022-10-18 18:56:52 +08:00
Yingwen
cdf3280fcf feat: Region supports write requests with old schema (#297)
* feat: Adds ColumnDefaultConstraint::create_default_vector

ColumnDefaultConstraint::create_default_vector is ported from
MitoTable::try_get_column_default_constraint_vector.

* refactor: Replace try_get_column_default_constraint_vector by create_default_vector

* style: Remove unnecessary map_err in MitoTable::insert

* feat: Adds compat_write

For column in `dest_schema` but not in `write_batch`, this method would insert a
vector with default value to the `write_batch`. If there are columns not in
`dest_schema`, an error would be returned.

* chore: Add info log to RegionInner::alter

* feat(storage): RegionImpl::write support request with old version

* feat: Add nullable check when creating default value

* feat: Validate nullable and default value

* chore: Modify PutOperation comments

* chore: Make ColumnDescriptor::is_nullable readonly and validate name

* feat: Use CompatWrite trait to replace campat::compat_write method

Adds a CompactWrite trait to support padding columns to WriteBatch:
- The WriteBatch and PutData implements this trait
- Fix the issue that WriteBatch::schema is not updated to the
  schema after compat
- Also validate the created column when adding to PutData

The WriteBatch would also pad default value to missing columns in
PutData, so the memtable inserter don't need to manually check whether
the column is nullable and then insert a NullVector. All WriteBatch is
ensured to have all columns defined by the schema in its PutData.

* feat: Validate constraint by ColumnDefaultConstraint::validate()

The ColumnDefaultConstraint::validate() would also ensure the default
value has the same data type as the column's.

* feat: Use NullVector for null columns

* fix: Fix BinaryType returns wrong logical_type_id

* fix: Fix tests and revert NullVector for null columns

NullVector doesn't support custom logical type make it hard to
encode/decode, which also cause the arrow/protobuf codec of write batch
fail.

* fix: create_default_vector use replicate to create vector with default value

This would fix the test_codec_with_none_column_protobuf test, as we need
to downcast the vector to construct the protobuf values.

* test: add tests for column default constraints

* test: Add tests for CompatWrite trait impl

* test: Test write region with old schema

* fix(storage): Fix replay() applies metadata too early

The committed sequence of the RegionChange action is the sequence of the
last entry that use the old metadata (schema). During replay, we should
apply the new metadata after we see an entry that has sequence greater
than (not equals to) the `RegionChange::committed_sequence`

Also remove duplicate `set_committed_sequence()` call in
persist_manifest_version()

* chore: Removes some unreachable codes

Also add more comments to document codes in these files

* refactor: Refactor MitoTable::insert

Return error if we could not create a default vector for given column,
instead of ignoring the error

* chore: Fix incorrect comments

* chore: Fix typo in error message
2022-10-18 10:47:24 +08:00
Ning Sun
f243649971 refactor: Removed openssl from build requirement (#308)
* refactor:replace another axum-test-helper branch

* refactor: upgrade opendal  version

* refactor: use cursor for file buffer

* refactor:remove native-tls in mysql_async

* refactor: use async block and pipeline for newer opendal api

* chore: update Cargo.lock

* chore: update dependencies

* docs: removed openssl from build requirement

* fix: call close on pipe writer to flush reader for parquet streamer

* refactor: remove redundant return

* chore: use pinned revision for our forked mysql_async

* style: avoid wild-card import in test code

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* style: use chained call for builder

Co-authored-by: liangxingjian <965662709@qq.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-10-17 19:29:17 +08:00
evenyag
69ba4581b7 test(servers): Fix OpenTSDB shutdown test occasionally fails (#311)
* test(servers): OpenTSDB shutdown test cover error branch

Create connection continuously to cover some branches of error handling
in OpentsdbServer

* test(servers): Add more tests for opentsdb server

Add a test to ensure we could not connect the server after shutdown and
a test to check existing connection usage after shutdown
2022-10-17 14:00:44 +08:00
evenyag
f942b53ed0 style(table-engine): Remove unnecessary TableError::from (#312)
The usage of TableError::from could be replaced by `?`, which is more
concise
2022-10-17 11:49:21 +08:00
dennis zhuang
25a16875b6 feat: create table and add new columns automatically in gRPC (#310)
* fix: readme

* feat: change Column's datatype in protobuf from optional to required

* feat: supports creating table and adding new columns automatically in gRPC, #279, #283

* fix: test

* refactor: execute_grpc_insert

* refactor: clean code and add test

* fix: test after rebasing develop branch

* test: test grpc server with different ports

* fix: typo

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: typo

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: minor changes

* chore: build_alter_table_request

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-17 10:34:52 +08:00
dennis zhuang
494a93c4f2 feat: manifest improvements (#303)
* feat: adds commited_sequence to RegionChange action, #281

* refactor: saving protocol action when writer version is changed

* feat: recover all region medata in manifest and replay them when replaying WAL, #282

* refactor: minor change and test recovering metadata after altering table schema

* fix: write wrong min_reader_version into manifest for region

* refactor: move up DataRow

* refactor: by CR comments

* test: assert recovered metadata

* refactor: by CR comments

* fix: comment
2022-10-13 15:43:35 +08:00
Ruihang Xia
b61d5989b7 fix: flaky parquet predicate suits (#307)
* fix: flaky parquet predicate suits

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: change ParquetWriter::write_rows as well

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-13 14:00:42 +08:00
evenyag
a8a6426abf fix: Fix replicate_primitive doesn't consider null values (#306) 2022-10-12 16:52:09 +08:00
Ruihang Xia
e99668092c refactor: relax memory ordering of accessing VersionControl::submmitted_sequence (#305)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-12 11:52:43 +08:00
Ruihang Xia
0c829a9712 chore: ignore vscode config directory in git (#299)
* chore: ignore vscode config directory in git

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: correct gitignore file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-10-10 15:08:26 +08:00
fys
752be8dc41 feat: batch grpc insert for influxdb write (#295) 2022-10-09 10:49:27 +08:00
evenyag
2e1ab050a7 feat: Implements RegionWriter::alter (#292)
* fix(storage): Failure of writing manifest version won't abort applying edit

* feat(storage): Adds RegionMetadata::validate_alter to validate AlterRequest

* fix(storage): Protect write and apply region edit by version mutex

The region meta action needs previous manifest version, so we need to
use the version mutex to avoid other thread update the manifest version
during writing the action to the manifest.

* feat(storage): Implement RegionWriter::alter

RegionWriter::alter() would
1. acquire write lock first
2. then validate the alter request
3. build the new metadata by RegionMetadata::alter()
4. acquire the version lock
5. write the metadata to the manifest, which also bump the manifest
   version
6. freeze mutable memtables and apply the new metadata to Version
7. write the manifest version to wal

* test(storage): Add tests for Region::alter()

* test(storage): Add tests for RegionMetadata::validate_alter

* chore(storage): Modify InvalidAlterRequest error msg

* chore: Adjust comment
2022-10-08 20:41:04 +08:00
Ning Sun
178f8b64b5 fix: Update pgwire and fix buffer overflow issue (#293) 2022-09-29 17:58:03 +08:00
fys
fe8327fc78 feat: support write data via influxdb line protocol in frontend (#280)
* feat: support influxdb line protocol write
2022-09-29 17:08:08 +08:00
evenyag
ed89cc3e21 feat: Change signature of the Region::alter method (#287)
* feat: Change signature of the Region::alter method

* refactor: Add builders for ColumnsMetadata and ColumnFamiliesMetadata

* feat: Support altering the region metadata

Altering the region metadata is done in a copy-write fashion:
1. Convert the `RegionMetadata` into `RegionDescriptor` which is more
   convenient to mutate
2. Apply the `AlterOperation` to the `RegionDescriptor`. This would
   mutate the descriptor in-place
3. Create a `RegionMetadataBuilder` from the descriptor, bump the
   version and then build the new metadata

* feat: Implement altering table using the new Region::alter api

* refactor: Replaced wal name by region id

Region id is cheaper to clone than name

* chore: Remove pub(crate) of build_xxxx in engine mod

* style: fix clippy

* test: Add tests for AlterOperation and RegionMetadata::alter

* chore: ColumnsMetadataBuilder methods return &mut Self
2022-09-28 13:56:25 +08:00
Lei, Huang
25078e821b feat: type rewrite optimizer (#272)
* feat: add type conversion optimizer

* feat: add expr rewrite logical plan optimizer

* chore: add some doc

* fix: unit test

* fix: time zone issue in unit tests

* chore: add more tests

* fix: some CR comments

* chore: rebase develop

* chore: fix unit tests

* fix: unit test use timestamp with time zone

* chore: add more tests
2022-09-28 13:56:13 +08:00
LFC
ca732d45f9 feat: opentsdb support (#274)
* feat: opentsdb support

* fix: tests

* fix: resolve CR comments

* fix: resolve CR comments

* fix: resolve CR comments

* fix: resolve CR comments

* refactor: remove feature flags for opentsdb and pg

* fix: resolve CR comments

* fix: resolve CR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-26 15:47:43 +08:00
dennis zhuang
0fa68ab7a5 feat: show databases and show tables (#276)
* feat: ensure time index column can't be included in primary key

* feat: sql parser supports show tables statement

* feat: impl show databases and show tables, #183

* feat: impl like expression for show databases/tables and add tests

* fix: typo

* fix: address CR problems
2022-09-26 14:05:49 +08:00
dennis zhuang
5f322ba16e feat: impl default constraint for column (#273)
* feat: impl default value for column in schema

* test: add test for column's default value

* refactor: rename ColumnDefaultValue to ColumnDefaultConstraint

* fix: timestamp column may be a constant vector

* fix: test_shutdown_pg_server

* fix: typo

Co-authored-by: LFC <bayinamine@gmail.com>

* fix: typo

Co-authored-by: LFC <bayinamine@gmail.com>

* fix: typo

Co-authored-by: LFC <bayinamine@gmail.com>

* chore: use table_info directly

Co-authored-by: LFC <bayinamine@gmail.com>

* refactor: by CR comments

Co-authored-by: LFC <bayinamine@gmail.com>
2022-09-22 10:43:21 +08:00
evenyag
a954ba862a feat: Implement dedup reader (#270)
* feat: Handle empty NullVector in replicate_null

* chore: Rename ChunkReaderImpl::sst_reader to batch_reader

* feat: dedup reader wip

* feat: Add BatchOp

Add BatchOp to support dedup/filter Batch and implement BatchOp for
ProjectedSchema.

Moves compare_row_of_batch to BatchOp::compare_row.

* feat: Allow Batch has empty columns

* feat: Implement DedupReader

Also add From<MutableBitmap> for BooleanVector

* test: Test dedup reader

Fix issue that compare_row compare by full key not row key

* chore: Add comments to BatchOp

* feat: Dedup results from merge reader

* test: Test merge read after flush

* test: Test merge read after flush and reopen

* test: Test replicate empty NullVector

* test: Add tests for `ProjectedSchema::dedup/filter`

* feat: Filter empty batch in DedepReader

Also fix clippy warnings and refactor some codes
2022-09-21 17:49:53 +08:00
evenyag
9489862417 fix: Fix sequence decrease after flush then reopen (#271)
The log store use start sequence instead of file start id to filter
log stream. Add more tests about flush, including flush empty memtable
and reopen after flush
2022-09-21 14:23:59 +08:00
Lei, Huang
35ba0868b5 feat: impl filter push down to parquet reader (#262)
* wip add predicate definition

* fix value move

* implement predicate and prune

* impl filter push down in chunk reader

* add more expr tests

* chore: rebase develop

* fix: unit test

* fix: field name/index lookup when building pruning stats

* chore: add some meaningless test

* fix: remove unnecessary extern crate

* fix: use datatypes::schema::SchemaRef
2022-09-21 11:47:55 +08:00
Ning Sun
8a400669aa feat: postgre wire protocol for frontend (#269) 2022-09-19 15:39:53 +08:00
evenyag
e697ba975b feat: Implement dedup and filter for vectors (#245)
* feat: Dedup vector

* refactor: Re-export Date/DateTime/Timestamp

* refactor: Named field for ListValueRef::Ref

Use field val instead of tuple for variant ListValueRef::Ref to keep
consistence with ListValueRef::Indexed

* feat: Implement ScalarVector for ListVector

Also implements ScalarVectorBuilder for ListVectorBuilder, Scalar for
ListValue and ScalarRef for ListValueRef

* test: Add tests for ScalarVector implementation of ListVector

* feat: Implement dedup using match_scalar_vector

* refactor: Move dedup func to individual mod

* chore: Update ListValueRef comments

* refactor: Move replicate to VectorOp

Move compute operations to VectorOp trait and acts as an super trait of
Vector. So we could later put dedup/filter methods to VectorOp trait,
avoid to define too many methods in Vector trait.

* refactor: Move scalar bounds to PrimitiveElement

Move Scalar and ScalarRef trait bounds to PrimitiveElement, so for each
native type which implements PrimitiveElement, its PrimitiveVector
always implements ScalarVector, so we could use it as ScalarVector
without adding additional trait bounds

* refactor: Move dedup to VectorOp

Remove compute mod and move dedup logic to operations::dedup

* feat: Implement VectorOp::filter

* test: Move replicate test of primitive to replicate.rs

* test: Add more replicate tests

* test: Add tests for dedup and filter

Also fix NullVector::dedup and ConstantVector::dedup

* style: fix clippy

* chore: Remove unused scalar.rs

* test: Add more tests for VectorOp and fix failed tests

Also fix TimestampVector eq not implemented.

* chore: Address CR comments

* chore: mention vector should be sorted in comment

* refactor: slice the vector directly in replicate_primitive_with_type
2022-09-19 14:05:02 +08:00
LFC
a649f34832 fix: select empty table (#268)
* fix: select empty table

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-19 11:28:12 +08:00
Lei, Huang
1770079691 fix: slice implementation for DateVector/DateTimeVector… (#266)
* fix: replicate and slice implementation for DateVector/DateTimeVector/TimestampVector

* chore: rebase develop
2022-09-16 16:38:46 +08:00
Ning Sun
1639b6e7ce refactor: rename to_vec to take for RecordBatches (#264) 2022-09-16 14:04:04 +08:00
Ning Sun
e67b0eb259 feat: Initial support of postgresql wire protocol (#229)
* feat: initial commit of postgres protocol adapter

* initial commit of postgres server

* feat: use common_io runtime and correct testcase

* fix previous tests

* feat: adopt pgwire api changes and add support for text encoded data

* feat: initial integration with datanode

* test: add feature flag to test

* fix: resolve lint warnings

* feat: add postgres feature flags for datanode

* feat: add support for newly introduced timestamp type

* feat: adopt latest datanode changes

* fix: address clippy warning for flattern scenario

* fix: make clippy great again

* fix: address issues found in review

* chore: sort dependencies by name

* feat: adopt new Output api

* fix: return error on unsupported data types

* refactor: extract common code dealing with record batches

* fix: resolve clippy warnings

* test: adds some unit tests postgres handler

* test: correct test for cargo update

* fix: update query module name

* test: add assertion for error content
2022-09-15 21:39:05 +08:00
LFC
fb6153f7e0 feat: a new type for supplying Ord to Primitive (#255)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-15 18:32:55 +08:00
dennis zhuang
dfa3012396 feat: improve python coprocesssor parser (#260)
* feat: supports DateTime, Date and Timestamp column type to be returned by py scripts

* feat: improve coprocessor compiler, make it work better

* fix: comments

* fix: typo

* Update src/script/src/python/vector.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* Update src/script/src/python/coprocessor.rs

Co-authored-by: LFC <bayinamine@gmail.com>

Co-authored-by: LFC <bayinamine@gmail.com>
2022-09-15 16:18:33 +08:00
dennis zhuang
c8cb705d9e ci: pre-commit configuration and hooks (#261)
* feat: adds pre-commit config and hooks

* refactor: sort all Cargo.toml by cargo-sort

* ci: adds conventional-pre-commit hook to pre-commit

* fix: remove .pre-commit-hooks.yaml

* fix: readme

* Update .pre-commit-config.yaml

Co-authored-by: Lei, Huang <6406592+v0y4g3r@users.noreply.github.com>

* ci: move clippy hook to push stage

* docs: install pre-push github hook

Co-authored-by: Lei, Huang <6406592+v0y4g3r@users.noreply.github.com>
2022-09-15 11:30:08 +08:00
fys
8400f8dfd4 chore: move query::Output to common-query module (#259)
* chore: move query::Output to common-query module

* chore: remove “query” dependency in client module
2022-09-15 10:07:58 +08:00
fys
ef40b12749 chore: add optional for datatype, rename data_type to datatype (#258) 2022-09-14 18:07:22 +08:00
evenyag
0dce8946d4 ci: Add ci cache (#256) 2022-09-14 16:06:59 +08:00
LFC
7dee7199dc fix: set unittests dir to /tmp can explode grcov's disk (#253)
* fix: set unittests dir to /tmp can explode grcov's disk

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-14 15:10:10 +08:00
Lei, Huang
2dbaad9770 fix: forbid use int64 as timestamp column data type (#248)
* fix: forbid use int64 as timestamp column data type

* fix unit test

* fix unit tests

* change gmt_created and gmt_modified data type in system tables to timestamp

* also change data type in readme
2022-09-14 12:03:16 +08:00
discord9
20dcaa6897 feat: interval& None value for prev&`next (#252)
* test: for builtin functions

* test: expect fail for `datetime()`

* feat: add `interval()` fn(WIP)

* feat: `interval()` fn in builtin(UNTEST)

* refactor: move `py_vec_obj_to_array` to util.rs

* style: fmt

* test: simple `interval()` cases

* test: `interval()` with `last()`&`first()`

* doc: `ts` param of `interval()`

* log: common_telemetry for logging in script crate

* doc: corrsponding test fn for each .ron file

* feat: change to`mpsc` for schedule_job

* test: schedule_job

* dep: rm rustpython dep in common-function

* refactor: mv `schedule_job` into `Script` trait

* test: change to use `interval` to sample datapoint

* feat: add gen_none_array for generate None Array

* feat: impl Missing value for `prev`&`next`

* test: `sum(prev(values))`

* doc: add comment for why not support Float16 in `prev()`

* feat: add `interval` in py side mock module

* style: cargo fmt

* refactor: according to comments

* refactor: extract `apply_interval_function`

* style: cargo fmt

* refactor: remove `schedule()`

* style: cargo fmt
2022-09-14 10:48:27 +08:00
LFC
ec99eb0cd0 feat: frontend instance (#238)
* feat: frontend instance

* no need to carry column length in `Column` proto

* add more tests

* rebase develop

* create a new variant with already provisioned RecordBatches in Output

* resolve code review comments

* new frontend instance does not connect datanode grpc

* add more tests

* add more tests

* rebase develop

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-13 17:10:22 +08:00
LFC
bdd5bdd917 Set unittest's logging dir to "/tmp" to not pollute source codes' dir when running unittests from IDE. (#249)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-13 16:14:32 +08:00
dennis zhuang
03169c4a04 feat: impl scripts table and /run-script restful api (#230)
* feat: impl scripts table and /execute restful api

* fix: test failures

* fix: test failures

* feat: impl /run_script API

* refactor: rename run_script api to run-script and test script manager

* fix: remove println

* refactor: error mod

* refactor: by CR comments

* feat: rebase develop and change timestamp/gmt_crated/gmt_modified type to timestamp

* refactor: use assert_eq instread of assert

* doc: fix comment in Script#execute function
2022-09-13 15:09:00 +08:00
LFC
cad35fe82e ci: make grcov happy (#246)
* make grcov happy

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-13 14:19:49 +08:00
LFC
64b6b2afe1 feat: procedure macro to help writing UDAF (#242)
* feat: procedure macro to help writing UDAF

* resolve code review comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-13 10:39:44 +08:00
Morranto
628cdb89e8 feat: Add grpc implementation for alter table opeartions (#239)
* feat: grpc-alter impl

* fix: format

* fix cr

* Update src/datanode/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/datanode/src/server/grpc/ddl.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* fix bug

* Update src/datanode/src/server/grpc/ddl.rs

Co-authored-by: Ning Sun <sunng@protonmail.com>

* fix:format

* fix bug

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
Co-authored-by: Ning Sun <sunng@protonmail.com>
2022-09-10 21:50:21 +08:00
evenyag
d52d1eb122 fix: Only convert LogicalTypeId to ConcreteDataType in tests (#241)
LogicalTypeId to ConcreteDataType is only allowed in tests, since some
additional info is not stored in LogicalTypeId now. It is just an id, or
kind, not contains full type info.
2022-09-09 17:48:59 +08:00
evenyag
0290cdb5d6 test: Fix merge tests (#243)
* test: Fix merge tests

The merge tests still use Int64Vector for timestamp, which should
use TimestampVector instead.

* test: Test Debug format for Source::Reader

Mainly for improve code coverage
2022-09-09 16:10:57 +08:00
Lei, Huang
9366e77407 feat: impl timestamp type, value and vectors (#226)
* wip: impl timestamp data type

* add timestamp vectors

* adapt to recent changes to vector module

* fix all unit test

* rebase develop

* fix slice

* change default time unit to millisecond

* add more tests

* fix some CR comments

* fix some CR comments

* fix clippy

* fix some cr comments

* fix some CR comments

* fix some CR comments

* remove time unit in LogicalTypeId::Timestamp
2022-09-09 11:43:30 +08:00
evenyag
82dfe78321 feat: Implement merge reader (#225)
* feat: Check columns when constructing Batch

* feat: Merge reader skeleton

* test: Add tests for MergeReader

* feat: Use get_ref to compare row

* feat: Implement MergeReader

* test: Add more tests

* feat: Use MergeReader to implement ChunkReader

Now the ChunkReaderImpl use MergeReader as default reader. Also add more tests to MergeReader.

* docs: Describe the merge algo in merge.rs

Ports the doc comments from kudu to merge.rs to describe the idea of the
merge algorithm we used.

* test: Fix unit tests

* chore: Address CR comments

Panics if number of columns in batch is not equal to `BatchBuilder`'s

* chore: Address CR comments

* chore: Implement Debug and add test for Node
2022-09-09 11:35:51 +08:00
fys
37a425658c chore: optimize status code (#235) 2022-09-08 15:34:44 +08:00
Morranto
cc0c883ee2 feat: add proto files for grpc-alter (#234)
* feat: add proto files for grpc-alter

* fix: format

Co-authored-by: liangxingjian <xingjianliang@proton.me>
2022-09-08 15:33:59 +08:00
evenyag
0ae99f7ac3 fix: Fix MitoTable::scan only returns one batch (#227)
* feat: Add tests for different batch

Add region scan test with different batch size

* fix: Fix table scan only returns one batch

* style: Fix clippy

* test: Add tests to scan table with rows more than batch size

* fix: Fix MockChunkReader never stop
2022-09-06 20:36:05 +08:00
evenyag
7f8195861e feat: Adds push_value_ref and extend_slice_of to MutableVector (#215)
* feat: Impl cmp_element() for Vector

* chore: Add doc comments to MutableVector

* feat: Add create_mutable() to DataType

Add `create_mutable()` to create a MutableVector for each DataType.
Implement ListVectorBuilder and NullVectorBuilder for ListType and
NullType.

* feat: Add ValueRef

ValueRef is a reference to value, could be used to avoid some allocation
when getting data from Vector. To support ValueRef, also implement a
ListValueRef for ListValue, but comparision of ListValueRef still
requires some allocation, due to the complexity of ListValue and
ListVector.

Impl some From trait for ValueRef

* feat: Implement get_ref for Vector

* feat: Remove cmp_element from Vector

`cmp_element` could be replaced by `get_ref` and then compare

* feat: Implement push/extend for PrimitiveVectorBuilder

Implement push_value_ref() and extend_slice_of() for
PrimitiveVectorBuilder.

Also refactor the DataTypeBuilder trait for
primitive types to PrimitiveElement trait, adds necessary cast helper
methods to it.
- Cast a reference to Vector to reference arrow's primitive array
- Cast a ValueRef to primitive type
- Also make PrimitiveElement super trait of Primitive

* feat: Implement push/extend for all vector builders

Implement push_value_ref() and extend_slice_of() for remaining vector
builders. Add some helpful cast method to ValueRef and a method to
cast Value to ValueRef.

Change the behavior of PrimitiveElement::cast_xxx to panic when unable
to cast, since push_value_ref() and extend_slice_of() always panic
when given invalid input data type.

* feat: MutableVector returns error if data type unmatch

* test: Add tests for ValueRef

* feat: Add tests for Vector::get_ref

* feat: NullVector returns error if data type unmatch

* test: Add tests for vector builders

* fix: Fix compile error in python coprocessor

* refactor: Add lifetime param to IntoValueRef

The Primitive trait just use the `IntoValueRef<'static>` bound. Also
rename create_mutable to create_mutable_vector.

* chore: Address CR comments

* feat: Customize PartialOrd/Ord for Value/ValueRef

Panics if values/refs have different data type

* style: Fix clippy

* refactor: Use macro to generate body of ValueRef::as_xxx
2022-09-06 13:44:48 +08:00
LFC
5e67301c00 feat: implement alter table (#218)
* feat: implement alter table

* Currently we have no plans to support altering the primary keys (maybe never), so removed the related codes.

* make `alter` a trait function in table

* address other CR comments

* cleanup

* rebase develop

* resolve code review comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-06 13:44:34 +08:00
LFC
119ff2fc2e feat: create table through GRPC interface (#224)
* feat: create table through GRPC interface

* move `CreateExpr` `oneof` expr of `AdminExpr` in `admin.proto`, and implement the admin GRPC interface

* add `table_options` and `partition_options` to `CreateExpr`

* resolve code review comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-09-06 12:51:07 +08:00
Lei, Huang
3f9144a2e3 fix: StringVector use Utf8Array (#222) 2022-09-02 11:25:33 +08:00
448 changed files with 41096 additions and 6812 deletions

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

View File

@@ -4,7 +4,7 @@
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|doc|perf|build|ci|revert)(\\(.*\\))?:.*",
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
"ignoreLabels" : ["ignore-title"]
}
}

View File

@@ -15,6 +15,7 @@ jobs:
grcov:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
@@ -24,6 +25,12 @@ jobs:
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
profile: minimal
- name: Rust Cache
uses: Swatinem/rust-cache@v2.0.0
- name: Cleanup disk
uses: curoky/cleanup-disk-action@v2.0
with:
retain: 'rust'
- name: Execute tests
uses: actions-rs/cargo@v1
with:
@@ -32,10 +39,11 @@ jobs:
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=abort -Zpanic_abort_tests"
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=unwind -Zpanic_abort_tests"
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Gather coverage data
id: coverage
uses: actions-rs/grcov@v0.1

View File

@@ -1,6 +1,18 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
push:
branches:
- develop
- main
paths-ignore:
- 'docs/**'
- 'config/**'
- '.github/**'
- '**.md'
- '**.yml'
- '.dockerignore'
- 'docker/**'
name: Continuous integration for developing
@@ -12,6 +24,7 @@ jobs:
name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
@@ -20,6 +33,8 @@ jobs:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2.0.0
- uses: actions-rs/cargo@v1
with:
command: check
@@ -29,6 +44,7 @@ jobs:
name: Test Suite
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
@@ -37,6 +53,8 @@ jobs:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2.0.0
- uses: actions-rs/cargo@v1
with:
command: test
@@ -46,11 +64,13 @@ jobs:
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
@@ -59,6 +79,8 @@ jobs:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2.0.0
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
@@ -69,6 +91,7 @@ jobs:
name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
@@ -77,6 +100,8 @@ jobs:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2.0.0
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:

View File

@@ -11,6 +11,7 @@ on:
jobs:
check:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:

179
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,179 @@
on:
push:
tags:
- "v*.*.*"
name: Release
env:
RUST_TOOLCHAIN: nightly-2022-07-14
jobs:
build:
name: Build binary
strategy:
matrix:
# The file format is greptime-<tag>.<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-latest
file: greptime-${{ github.ref_name }}.linux-amd64
- arch: aarch64-unknown-linux-gnu
os: ubuntu-latest
file: greptime-${{ github.ref_name }}.linux-arm64
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-${{ github.ref_name }}.darwin-arm64
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-${{ github.ref_name }}.darwin-amd64
runs-on: ${{ matrix.os }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache cargo assets
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install Protoc for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: | # Make sure the protoc is >= 3.15
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.9/protoc-21.9-linux-x86_64.zip
unzip protoc-21.9-linux-x86_64.zip -d protoc
sudo cp protoc/bin/protoc /usr/local/bin/
sudo cp -r protoc/include/google /usr/local/include/
- name: Install Protoc for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install protobuf
- name: Install dependencies for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
target: ${{ matrix.arch }}
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
- name: Calculate checksum and rename binary
shell: bash
run: |
cd target/${{ matrix.arch }}/release
cp greptime ${{ matrix.file }}
echo $(shasum -a 256 greptime | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/release/${{ matrix.file }}
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.sha256sum
release:
name: Release artifacts
needs: [build]
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Publish release
uses: softprops/action-gh-release@v1
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-${{ github.ref_name }}.*
docker:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-${{ github.ref_name }}.linux-amd64
path: amd64
- name: Rename amd64 binary
run: |
mv amd64/greptime-${{ github.ref_name }}.linux-amd64 amd64/greptime
- name: Download arm64 binary
uses: actions/download-artifact@v3
with:
name: greptime-${{ github.ref_name }}.linux-arm64
path: arm64
- name: Rename arm64 binary
run: |
mv arm64/greptime-${{ github.ref_name }}.linux-arm64 arm64/greptime
- name: Set file permissions
shell: bash
run: |
chmod +x amd64/greptime arm64/greptime
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
uses: docker/build-push-action@v3
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/greptimeteam/greptimedb:${{ github.ref_name }}
greptime/greptimedb:${{ github.ref_name }}

6
.gitignore vendored
View File

@@ -19,12 +19,12 @@ debug/
# JetBrains IDE config directory
.idea/
# VSCode IDE config directory
.vscode/
# Logs
**/__unittest_logs
logs/
.DS_store
.gitignore
# cpython's generated python byte code
**/__pycache__/

21
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,21 @@
repos:
- repo: https://github.com/compilerla/conventional-pre-commit
rev: 47923ce11be4a936cd216d427d985dd342adb751
hooks:
- id: conventional-pre-commit
stages: [commit-msg]
- repo: https://github.com/DevinR528/cargo-sort
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
hooks:
- id: cargo-sort
args: ["--workspace"]
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
- id: clippy
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
stages: [push]
- id: cargo-check

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -10,6 +10,34 @@ To learn about the design of GreptimeDB, please refer to the [design docs](https
- Make sure all unit tests are passed.
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
1. Install `pre-commit`
```
$ pip install pre-commit
```
or
```
$ brew install pre-commit
```
2. Install the `pre-commit` hooks
```
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-pus
```
now `pre-commit` will run automatically on `git commit`.
### Title
The titles of pull requests should be prefixed with category name listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
@@ -32,4 +60,4 @@ of what you were trying to do and what went wrong. You can also reach for help i
## Bug report
To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).

2047
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,20 +3,25 @@ members = [
"src/api",
"src/catalog",
"src/client",
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/error",
"src/common/function",
"src/common/function-macro",
"src/common/grpc",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
"src/common/substrait",
"src/common/telemetry",
"src/common/time",
"src/cmd",
"src/datanode",
"src/datatypes",
"src/frontend",
"src/log-store",
"src/logical-plans",
"src/meta-client",
"src/meta-srv",
"src/object-store",
"src/query",
"src/script",
@@ -26,5 +31,4 @@ members = [
"src/store-api",
"src/table",
"src/table-engine",
"test-util",
]

View File

@@ -11,7 +11,6 @@ GreptimeDB: the next-generation hybrid timeseries/analytics processing database
To compile GreptimeDB from source, you'll need the following:
- Rust
- Protobuf
- OpenSSL
#### Rust
@@ -23,23 +22,6 @@ The easiest way to install Rust is to use [`rustup`](https://rustup.rs/), which
major package manager on macos and linux distributions. You can find an
installation instructions [here](https://grpc.io/docs/protoc-installation/).
#### OpenSSL
For Ubuntu:
```bash
sudo apt install libssl-dev
```
For RedHat-based: Fedora, Oracle Linux, etc:
```bash
sudo dnf install openssl-devel
```
For macOS:
```bash
brew install openssl
```
### Build the Docker Image
```
@@ -80,6 +62,33 @@ docker run -p 3000:3000 \
greptimedb
```
### Start Frontend
Frontend should connect to Datanode, so **Datanode must have been started** at first!
```
// Connects to local Datanode at its default GRPC port: 3001
// Start Frontend with default options.
cargo run -- frontend start
OR
// Start Frontend with `mysql-addr` option.
cargo run -- frontend start --mysql-addr=0.0.0.0:9999
OR
// Start datanode with `log-dir` and `log-level` options.
cargo run -- --log-dir=logs --log-level=debug frontend start
```
Start datanode with config file:
```
cargo run -- --log-dir=logs --log-level=debug frontend start -c ./config/frontend.example.toml
```
### SQL Operations
1. Connecting DB by [mysql client](https://dev.mysql.com/downloads/mysql/):
@@ -94,11 +103,11 @@ greptimedb
```SQL
CREATE TABLE monitor (
host STRING,
ts BIGINT,
ts TIMESTAMP,
cpu DOUBLE DEFAULT 0,
memory DOUBLE,
TIME INDEX (ts),
PRIMARY KEY(ts,host)) ENGINE=mito WITH(regions=1);
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
```
3. Insert data:
@@ -123,3 +132,7 @@ greptimedb
3 rows in set (0.01 sec)
```
You can delete your data by removing `/tmp/greptimedb`.
## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.

View File

@@ -44,13 +44,15 @@ def as_table(kline: list):
"rv_60d",
"rv_90d",
"rv_180d"
],
sql="select open_time, close from k_line")
])
def calc_rvs(open_time, close):
from greptime import vector, log, prev, sqrt, datetime, pow, sum
from greptime import vector, log, prev, sqrt, datetime, pow, sum, last
import greptime as g
def calc_rv(close, open_time, time, interval):
mask = (open_time < time) & (open_time > time - interval)
close = close[mask]
open_time = open_time[mask]
close = g.interval(open_time, close, datetime("10m"), lambda x:last(x))
avg_time_interval = (open_time[-1] - open_time[0])/(len(open_time)-1)
ref = log(close/prev(close))
@@ -60,10 +62,10 @@ def calc_rvs(open_time, close):
# how to get env var,
# maybe through accessing scope and serde then send to remote?
timepoint = open_time[-1]
rv_7d = calc_rv(close, open_time, timepoint, datetime("7d"))
rv_15d = calc_rv(close, open_time, timepoint, datetime("15d"))
rv_30d = calc_rv(close, open_time, timepoint, datetime("30d"))
rv_60d = calc_rv(close, open_time, timepoint, datetime("60d"))
rv_90d = calc_rv(close, open_time, timepoint, datetime("90d"))
rv_180d = calc_rv(close, open_time, timepoint, datetime("180d"))
rv_7d = vector([calc_rv(close, open_time, timepoint, datetime("7d"))])
rv_15d = vector([calc_rv(close, open_time, timepoint, datetime("15d"))])
rv_30d = vector([calc_rv(close, open_time, timepoint, datetime("30d"))])
rv_60d = vector([calc_rv(close, open_time, timepoint, datetime("60d"))])
rv_90d = vector([calc_rv(close, open_time, timepoint, datetime("90d"))])
rv_180d = vector([calc_rv(close, open_time, timepoint, datetime("180d"))])
return rv_7d, rv_15d, rv_30d, rv_60d, rv_90d, rv_180d

View File

@@ -7,7 +7,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231300,
"open_time": 300,
"open": "10107",
"high": "10109.34",
"low": "10106.71",
@@ -16,7 +16,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231360,
"open_time": 900,
"open": "10106.79",
"high": "10109.27",
"low": "10105.92",
@@ -25,7 +25,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231420,
"open_time": 1200,
"open": "10106.09",
"high": "10108.75",
"low": "10104.66",
@@ -34,7 +34,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231480,
"open_time": 1800,
"open": "10108.73",
"high": "10109.52",
"low": "10106.07",
@@ -43,7 +43,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231540,
"open_time": 2400,
"open": "10106.38",
"high": "10109.48",
"low": "10104.81",
@@ -52,7 +52,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231600,
"open_time": 3000,
"open": "10106.95",
"high": "10109.48",
"low": "10106.6",
@@ -61,7 +61,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231660,
"open_time": 3600,
"open": "10107.55",
"high": "10109.28",
"low": "10104.68",
@@ -70,7 +70,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231720,
"open_time": 4200,
"open": "10104.68",
"high": "10109.18",
"low": "10104.14",
@@ -79,7 +79,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231780,
"open_time": 4800,
"open": "10108.8",
"high": "10117.36",
"low": "10108.8",
@@ -88,7 +88,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231840,
"open_time": 5400,
"open": "10115.96",
"high": "10119.19",
"low": "10115.96",
@@ -97,7 +97,7 @@
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1581231900,
"open_time": 6000,
"open": "10117.08",
"high": "10120.73",
"low": "10116.96",

View File

@@ -1,4 +1,4 @@
from .greptime import coprocessor, copr
from .greptime import vector, log, prev, sqrt, pow, datetime, sum
from .greptime import vector, log, prev, next, first, last, sqrt, pow, datetime, sum, interval
from .mock import mock_tester
from .cfg import set_conn_addr, get_conn_addr

View File

@@ -89,6 +89,11 @@ class vector(np.ndarray):
def filter(self, lst_bool):
return self[lst_bool]
def last(lst):
return lst[-1]
def first(lst):
return lst[0]
def prev(lst):
ret = np.zeros(len(lst))
@@ -96,35 +101,22 @@ def prev(lst):
ret[0] = nan
return ret
def next(lst):
ret = np.zeros(len(lst))
ret[:-1] = lst[1:]
ret[-1] = nan
return ret
def query(sql: str):
pass
def interval(arr: list, duration: int, fill, step: None | int = None, explicitOffset=False):
def interval(ts: vector, arr: vector, duration: int, func):
"""
Note that this is a mock function with same functionailty to the actual Python Coprocessor
`arr` is a vector of integral or temporal type.
`duration` is the length of sliding window
`step` being the length when sliding window take a step
`fill` indicate how to fill missing value:
- "prev": use previous
- "post": next
- "linear": linear interpolation, if not possible to interpolate certain types, fallback to prev
- "null": use null
- "none": do not interpolate
"""
if step is None:
step = duration
tot_len = int(np.ceil(len(arr) // step))
slices = np.zeros((tot_len, int(duration)))
for idx, start in enumerate(range(0, len(arr), step)):
slices[idx] = arr[start:(start + duration)]
return slices
start = np.min(ts)
end = np.max(ts)
masks = [(ts >= i) & (ts <= (i+duration)) for i in range(start, end, duration)]
lst_res = [func(arr[mask]) for mask in masks]
return lst_res
def factor(unit: str) -> int:

View File

@@ -4,7 +4,7 @@ it can only run on mock data and support by numpy
"""
from typing import Any
import numpy as np
from .greptime import i32,i64,f32,f64, vector, interval, query, prev, datetime, log, sum, sqrt, pow, nan, copr, coprocessor
from .greptime import i32,i64,f32,f64, vector, interval, prev, datetime, log, sum, sqrt, pow, nan, copr, coprocessor
import inspect
import functools

View File

@@ -26,6 +26,16 @@ def get_db(req:str):
return requests.get("http://{}{}".format(get_conn_addr(), req))
if __name__ == "__main__":
with open("component/script/python/example/kline.json", "r") as kline_file:
kline = json.load(kline_file)
table = as_table(kline["result"])
close = table["close"]
open_time = table["open_time"]
env = {"close":close, "open_time": open_time}
res = mock_tester(calc_rvs, env=env)
print("Mock result:", [i[0] for i in res])
exit()
if len(sys.argv)!=2:
raise Exception("Expect only one address as cmd's args")
set_conn_addr(sys.argv[1])
@@ -42,11 +52,6 @@ if __name__ == "__main__":
open_time = table["open_time"]
init_table(close, open_time)
# print(repr(close), repr(open_time))
# print("calc_rv:", calc_rv(close, open_time, open_time[-1]+datetime("10m"), datetime("7d")))
env = {"close":close, "open_time": open_time}
# print("env:", env)
print("Mock result:", mock_tester(calc_rvs, env=env))
real = calc_rvs()
print(real)
try:

View File

@@ -1,10 +1,15 @@
http_addr = '0.0.0.0:3000'
rpc_addr = '0.0.0.0:3001'
wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
mysql_addr = '0.0.0.0:3306'
mysql_runtime_size = 4
# applied when postgres feature enbaled
postgres_addr = '0.0.0.0:5432'
postgres_runtime_size = 4
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'

View File

@@ -0,0 +1,4 @@
http_addr = '0.0.0.0:4000'
grpc_addr = '0.0.0.0:4001'
mysql_addr = '0.0.0.0:4003'
mysql_runtime_size = 4

View File

@@ -0,0 +1,4 @@
bind_addr = '127.0.0.1:3002'
server_addr = '0.0.0.0:3002'
store_addr = '127.0.0.1:2380'
datanode_lease_secs = 30

View File

@@ -24,9 +24,8 @@ RUN cargo build --release
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
WORKDIR /greptimedb
COPY --from=builder /greptimedb/target/release/greptime /greptimedb/bin/
ENV PATH /greptimedb/bin/:$PATH
WORKDIR /greptime
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT [ "greptime" ]
CMD [ "datanode", "start"]
ENTRYPOINT ["greptime"]

9
docker/ci/Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM ubuntu:22.04
ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -8,27 +8,29 @@ So is there a way we can make an aggregate function that automatically match the
# 1. Impl `AggregateFunctionCreator` trait for your accumulator creator.
You must first define a struct that can store the input data's type. For example,
You must first define a struct that will be used to create your accumulator. For example,
```Rust
struct MySumAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, AggrFuncTypeStore)]
struct MySumAccumulatorCreator {}
```
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
Then impl `AggregateFunctionCreator` trait on it. The definition of the trait is:
```Rust
pub trait AggregateFunctionCreator: Send + Sync + Debug {
fn creator(&self) -> AccumulatorCreatorFunction;
fn input_types(&self) -> Vec<ConcreteDataType>;
fn set_input_types(&self, input_types: Vec<ConcreteDataType>);
fn output_type(&self) -> ConcreteDataType;
fn state_types(&self) -> Vec<ConcreteDataType>;
}
```
our query engine will call `set_input_types` the very first, so you can use input data's type in methods that return output type and state types.
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count).

View File

@@ -2,11 +2,12 @@
name = "api"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
datatypes = { path = "../datatypes" }
prost = "0.11"
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
[build-dependencies]

View File

@@ -6,6 +6,11 @@ fn main() {
"greptime/v1/select.proto",
"greptime/v1/physical_plan.proto",
"greptime/v1/greptime.proto",
"greptime/v1/meta/common.proto",
"greptime/v1/meta/heartbeat.proto",
"greptime/v1/meta/route.proto",
"greptime/v1/meta/store.proto",
"prometheus/remote/remote.proto",
],
&["."],
)

View File

@@ -2,8 +2,54 @@ syntax = "proto3";
package greptime.v1;
// TODO(jiachun)
message AdminRequest {}
import "greptime/v1/column.proto";
import "greptime/v1/common.proto";
// TODO(jiachun)
message AdminResponse {}
message AdminRequest {
string name = 1;
repeated AdminExpr exprs = 2;
}
message AdminResponse {
repeated AdminResult results = 1;
}
message AdminExpr {
ExprHeader header = 1;
oneof expr {
CreateExpr create = 2;
AlterExpr alter = 3;
}
}
message AdminResult {
ResultHeader header = 1;
oneof result {
MutateResult mutate = 2;
}
}
message CreateExpr {
optional string catalog_name = 1;
optional string schema_name = 2;
string table_name = 3;
optional string desc = 4;
repeated ColumnDef column_defs = 5;
string time_index = 6;
repeated string primary_keys = 7;
bool create_if_not_exists = 8;
map<string, string> table_options = 9;
}
message AlterExpr {
optional string catalog_name = 1;
optional string schema_name = 2;
string table_name = 3;
oneof kind {
AddColumn add_column = 4;
}
}
message AddColumn {
ColumnDef column_def = 1;
}

View File

@@ -29,6 +29,10 @@ message Column {
repeated bool bool_values = 11;
repeated bytes binary_values = 12;
repeated string string_values = 13;
repeated int32 date_values = 14;
repeated int64 datetime_values = 15;
repeated int64 ts_millis_values = 16;
}
// The array of non-null values in this column.
//
@@ -43,4 +47,33 @@ message Column {
// Mask maps the positions of null values.
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
bytes null_mask = 4;
// Helpful in creating vector from column.
ColumnDataType datatype = 5;
}
message ColumnDef {
string name = 1;
ColumnDataType datatype = 2;
bool is_nullable = 3;
optional bytes default_constraint = 4;
}
enum ColumnDataType {
BOOLEAN = 0;
INT8 = 1;
INT16 = 2;
INT32 = 3;
INT64 = 4;
UINT8 = 5;
UINT16 = 6;
UINT32 = 7;
UINT64 = 8;
FLOAT32 = 9;
FLOAT64 = 10;
BINARY = 11;
STRING = 12;
DATE = 13;
DATETIME = 14;
TIMESTAMP = 15;
}

View File

@@ -0,0 +1,22 @@
syntax = "proto3";
package greptime.v1;
message RequestHeader {
string tenant = 1;
}
message ExprHeader {
uint32 version = 1;
}
message ResultHeader {
uint32 version = 1;
uint32 code = 2;
string err_msg = 3;
}
message MutateResult {
uint32 success = 1;
uint32 failure = 2;
}

View File

@@ -2,6 +2,8 @@ syntax = "proto3";
package greptime.v1;
import "greptime/v1/common.proto";
message DatabaseRequest {
string name = 1;
repeated ObjectExpr exprs = 2;
@@ -21,14 +23,11 @@ message ObjectExpr {
}
}
message ExprHeader {
uint32 version = 1;
}
// TODO(fys): Only support sql now, and will support promql etc in the future
message SelectExpr {
oneof expr {
string sql = 1;
bytes logical_plan = 2;
PhysicalPlan physical_plan = 15;
}
}
@@ -40,7 +39,25 @@ message PhysicalPlan {
message InsertExpr {
string table_name = 1;
repeated bytes values = 2;
message Values {
repeated bytes values = 1;
}
oneof expr {
Values values = 2;
// TODO(LFC): Remove field "sql" in InsertExpr.
// When Frontend instance received an insertion SQL (`insert into ...`), it's anticipated to parse the SQL and
// assemble the values to insert to feed Datanode. In other words, inserting data through Datanode instance's GRPC
// interface shouldn't use SQL directly.
// Then why the "sql" field exists here? It's because the Frontend needs table schema to create the values to insert,
// which is currently not able to find anywhere. (Maybe the table schema is suppose to be fetched from Meta?)
// The "sql" field is meant to be removed in the future.
string sql = 3;
}
map<string, bytes> options = 4;
}
// TODO(jiachun)
@@ -59,14 +76,3 @@ message ObjectResult {
message SelectResult {
bytes raw_data = 1;
}
message ResultHeader {
uint32 version = 1;
uint32 code = 2;
string err_msg = 3;
}
message MutateResult {
uint32 success = 1;
uint32 failure = 2;
}

View File

@@ -3,6 +3,7 @@ syntax = "proto3";
package greptime.v1;
import "greptime/v1/admin.proto";
import "greptime/v1/common.proto";
import "greptime/v1/database.proto";
service Greptime {
@@ -10,8 +11,9 @@ service Greptime {
}
message BatchRequest {
repeated AdminRequest admins = 1;
repeated DatabaseRequest databases = 2;
RequestHeader header = 1;
repeated AdminRequest admins = 2;
repeated DatabaseRequest databases = 3;
}
message BatchResponse {

View File

@@ -0,0 +1,48 @@
syntax = "proto3";
package greptime.v1.meta;
message RequestHeader {
uint64 protocol_version = 1;
// cluster_id is the ID of the cluster which be sent to.
uint64 cluster_id = 2;
// member_id is the ID of the sender server.
uint64 member_id = 3;
}
message ResponseHeader {
uint64 protocol_version = 1;
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 2;
Error error = 3;
}
message Error {
int32 code = 1;
string err_msg = 2;
}
message Peer {
uint64 id = 1;
string addr = 2;
}
message TableName {
string catalog_name = 1;
string schema_name = 2;
string table_name = 3;
}
message TimeInterval {
// The unix timestamp in millis of the start of this period.
uint64 start_timestamp_millis = 1;
// The unix timestamp in millis of the end of this period.
uint64 end_timestamp_millis = 2;
}
message KeyValue {
// key is the key in bytes. An empty key is not allowed.
bytes key = 1;
// value is the value held by the key, in bytes.
bytes value = 2;
}

View File

@@ -0,0 +1,92 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Heartbeat {
// Heartbeat, there may be many contents of the heartbeat, such as:
// 1. Metadata to be registered to meta server and discoverable by other nodes.
// 2. Some performance metrics, such as Load, CPU usage, etc.
// 3. The number of computing tasks being executed.
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
// Ask leader's endpoint.
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
}
message HeartbeatRequest {
RequestHeader header = 1;
// Self peer
Peer peer = 2;
// Leader node
bool is_leader = 3;
// Actually reported time interval
TimeInterval report_interval = 4;
// Node stat
NodeStat node_stat = 5;
// Region stats in this node
repeated RegionStat region_stats = 6;
// Follower nodes and stats, empty on follower nodes
repeated ReplicaStat replica_stats = 7;
}
message NodeStat {
// The read capacity units during this period
uint64 rcus = 1;
// The write capacity units during this period
uint64 wcus = 2;
// Table number in this node
uint64 table_num = 3;
// Regon number in this node
uint64 region_num = 4;
double cpu_usage = 5;
double load = 6;
// Read disk I/O in the node
double read_io_rate = 7;
// Write disk I/O in the node
double write_io_rate = 8;
// Others
map<string, string> attrs = 100;
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
// The read capacity units during this period
uint64 rcus = 3;
// The write capacity units during this period
uint64 wcus = 4;
// Approximate region size
uint64 approximate_size = 5;
// Approximate number of rows
uint64 approximate_rows = 6;
// Others
map<string, string> attrs = 100;
}
message ReplicaStat {
Peer peer = 1;
bool in_sync = 2;
bool is_learner = 3;
}
message HeartbeatResponse {
ResponseHeader header = 1;
repeated bytes payload = 2;
}
message AskLeaderRequest {
RequestHeader header = 1;
}
message AskLeaderResponse {
ResponseHeader header = 1;
Peer leader = 2;
}

View File

@@ -0,0 +1,82 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Router {
// Fetch routing information for tables. The smallest unit is the complete
// routing information(all regions) of a table.
//
// ```text
// table_1
// table_name
// table_schema
// regions
// region_1
// leader_peer
// follower_peer_1, follower_peer_2
// region_2
// leader_peer
// follower_peer_1, follower_peer_2, follower_peer_3
// region_xxx
// table_2
// ...
// ```
//
rpc Route(RouteRequest) returns (RouteResponse) {}
rpc Create(CreateRequest) returns (RouteResponse) {}
}
message RouteRequest {
RequestHeader header = 1;
repeated TableName table_names = 2;
}
message RouteResponse {
ResponseHeader header = 1;
repeated Peer peers = 2;
repeated TableRoute table_routes = 3;
}
message CreateRequest {
RequestHeader header = 1;
TableName table_name = 2;
repeated Partition partitions = 3;
}
message TableRoute {
Table table = 1;
repeated RegionRoute region_routes = 2;
}
message RegionRoute {
Region region = 1;
// single leader node for write task
uint64 leader_peer_index = 2;
// multiple follower nodes for read task
repeated uint64 follower_peer_indexes = 3;
}
message Table {
TableName table_name = 1;
bytes table_schema = 2;
}
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
map<string, string> attrs = 100;
}
// PARTITION `region_name` VALUES LESS THAN (value_list)
message Partition {
repeated bytes column_list = 1;
repeated bytes value_list = 2;
}

View File

@@ -0,0 +1,138 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Store {
// Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse);
// Put puts the given key into the key-value store.
rpc Put(PutRequest) returns (PutResponse);
// BatchPut atomically puts the given keys into the key-value store.
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
// CompareAndPut atomically puts the value to the given updated
// value if the current value == the expected value.
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
// DeleteRange deletes the given range from the key-value store.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
}
message RangeRequest {
RequestHeader header = 1;
// key is the first key for the range, If range_end is not given, the
// request only looks up key.
bytes key = 2;
// range_end is the upper bound on the requested range [key, range_end).
// If range_end is '\0', the range is all keys >= key.
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
// then the range request gets all keys prefixed with key.
// If both key and range_end are '\0', then the range request returns all
// keys.
bytes range_end = 3;
// limit is a limit on the number of keys returned for the request. When
// limit is set to 0, it is treated as no limit.
int64 limit = 4;
// keys_only when set returns only the keys and not the values.
bool keys_only = 5;
}
message RangeResponse {
ResponseHeader header = 1;
// kvs is the list of key-value pairs matched by the range request.
repeated KeyValue kvs = 2;
// more indicates if there are more keys to return in the requested range.
bool more = 3;
}
message PutRequest {
RequestHeader header = 1;
// key is the key, in bytes, to put into the key-value store.
bytes key = 2;
// value is the value, in bytes, to associate with the key in the
// key-value store.
bytes value = 3;
// If prev_kv is set, gets the previous key-value pair before changing it.
// The previous key-value pair will be returned in the put response.
bool prev_kv = 4;
}
message PutResponse {
ResponseHeader header = 1;
// If prev_kv is set in the request, the previous key-value pair will be
// returned.
KeyValue prev_kv = 2;
}
message BatchPutRequest {
RequestHeader header = 1;
repeated KeyValue kvs = 2;
// If prev_kv is set, gets the previous key-value pairs before changing it.
// The previous key-value pairs will be returned in the batch put response.
bool prev_kv = 3;
}
message BatchPutResponse {
ResponseHeader header = 1;
// If prev_kv is set in the request, the previous key-value pairs will be
// returned.
repeated KeyValue prev_kvs = 2;
}
message CompareAndPutRequest {
RequestHeader header = 1;
// key is the key, in bytes, to put into the key-value store.
bytes key = 2;
// expect is the previous value, in bytes
bytes expect = 3;
// value is the value, in bytes, to associate with the key in the
// key-value store.
bytes value = 4;
}
message CompareAndPutResponse {
ResponseHeader header = 1;
bool success = 2;
KeyValue prev_kv = 3;
}
message DeleteRangeRequest {
RequestHeader header = 1;
// key is the first key to delete in the range.
bytes key = 2;
// range_end is the key following the last key to delete for the range
// [key, range_end).
// If range_end is not given, the range is defined to contain only the key
// argument.
// If range_end is one bit larger than the given key, then the range is all
// the keys with the prefix (the given key).
// If range_end is '\0', the range is all keys greater than or equal to the
// key argument.
bytes range_end = 3;
// If prev_kv is set, gets the previous key-value pairs before deleting it.
// The previous key-value pairs will be returned in the delete response.
bool prev_kv = 4;
}
message DeleteRangeResponse {
ResponseHeader header = 1;
// deleted is the number of keys deleted by the delete range request.
int64 deleted = 2;
// If prev_kv is set in the request, the previous key-value pairs will be
// returned.
repeated KeyValue prev_kvs = 3;
}

View File

@@ -0,0 +1,85 @@
// Copyright 2016 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package prometheus;
option go_package = "prompb";
import "prometheus/remote/types.proto";
message WriteRequest {
repeated prometheus.TimeSeries timeseries = 1;
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
repeated prometheus.MetricMetadata metadata = 3;
}
// ReadRequest represents a remote read request.
message ReadRequest {
repeated Query queries = 1;
enum ResponseType {
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
// It's recommended to use streamed response types instead.
//
// Response headers:
// Content-Type: "application/x-protobuf"
// Content-Encoding: "snappy"
SAMPLES = 0;
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
//
// Response headers:
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
// Content-Encoding: ""
STREAMED_XOR_CHUNKS = 1;
}
// accepted_response_types allows negotiating the content type of the response.
//
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
// implemented by server, error is returned.
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
repeated ResponseType accepted_response_types = 2;
}
// ReadResponse is a response when response_type equals SAMPLES.
message ReadResponse {
// In same order as the request's queries.
repeated QueryResult results = 1;
}
message Query {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated prometheus.LabelMatcher matchers = 3;
prometheus.ReadHints hints = 4;
}
message QueryResult {
// Samples within a time series must be ordered by time.
repeated prometheus.TimeSeries timeseries = 1;
}
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
message ChunkedReadResponse {
repeated prometheus.ChunkedSeries chunked_series = 1;
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
int64 query_index = 2;
}

View File

@@ -0,0 +1,117 @@
// Copyright 2017 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package prometheus;
option go_package = "prompb";
message MetricMetadata {
enum MetricType {
UNKNOWN = 0;
COUNTER = 1;
GAUGE = 2;
HISTOGRAM = 3;
GAUGEHISTOGRAM = 4;
SUMMARY = 5;
INFO = 6;
STATESET = 7;
}
// Represents the metric type, these match the set from Prometheus.
// Refer to model/textparse/interface.go for details.
MetricType type = 1;
string metric_family_name = 2;
string help = 4;
string unit = 5;
}
message Sample {
double value = 1;
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 2;
}
message Exemplar {
// Optional, can be empty.
repeated Label labels = 1;
double value = 2;
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 3;
}
// TimeSeries represents samples and labels for a single time series.
message TimeSeries {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
repeated Label labels = 1;
repeated Sample samples = 2;
repeated Exemplar exemplars = 3;
}
message Label {
string name = 1;
string value = 2;
}
message Labels {
repeated Label labels = 1;
}
// Matcher specifies a rule, which can match or set of labels or not.
message LabelMatcher {
enum Type {
EQ = 0;
NEQ = 1;
RE = 2;
NRE = 3;
}
Type type = 1;
string name = 2;
string value = 3;
}
message ReadHints {
int64 step_ms = 1; // Query step size in milliseconds.
string func = 2; // String representation of surrounding function or aggregation.
int64 start_ms = 3; // Start time in milliseconds.
int64 end_ms = 4; // End time in milliseconds.
repeated string grouping = 5; // List of label names used in aggregation.
bool by = 6; // Indicate whether it is without or by.
int64 range_ms = 7; // Range vector selector range in milliseconds.
}
// Chunk represents a TSDB chunk.
// Time range [min, max] is inclusive.
message Chunk {
int64 min_time_ms = 1;
int64 max_time_ms = 2;
// We require this to match chunkenc.Encoding.
enum Encoding {
UNKNOWN = 0;
XOR = 1;
}
Encoding type = 3;
bytes data = 4;
}
// ChunkedSeries represents single, encoded time series.
message ChunkedSeries {
// Labels should be sorted.
repeated Label labels = 1;
// Chunks will be in start time order and may overlap.
repeated Chunk chunks = 2;
}

18
src/api/src/error.rs Normal file
View File

@@ -0,0 +1,18 @@
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use snafu::Backtrace;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType { datatype: i32, backtrace: Backtrace },
#[snafu(display("Failed to create column datatype from {:?}", from))]
IntoColumnDataType {
from: ConcreteDataType,
backtrace: Backtrace,
},
}

361
src/api/src/helper.rs Normal file
View File

@@ -0,0 +1,361 @@
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use crate::error::{self, Result};
use crate::v1::column::Values;
use crate::v1::ColumnDataType;
#[derive(Debug, PartialEq, Eq)]
pub struct ColumnDataTypeWrapper(ColumnDataType);
impl ColumnDataTypeWrapper {
pub fn try_new(datatype: i32) -> Result<Self> {
let datatype = ColumnDataType::from_i32(datatype)
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
Ok(Self(datatype))
}
pub fn datatype(&self) -> ColumnDataType {
self.0
}
}
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
fn from(datatype: ColumnDataTypeWrapper) -> Self {
match datatype.0 {
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
ColumnDataType::Int32 => ConcreteDataType::int32_datatype(),
ColumnDataType::Int64 => ConcreteDataType::int64_datatype(),
ColumnDataType::Uint8 => ConcreteDataType::uint8_datatype(),
ColumnDataType::Uint16 => ConcreteDataType::uint16_datatype(),
ColumnDataType::Uint32 => ConcreteDataType::uint32_datatype(),
ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(),
ColumnDataType::Float32 => ConcreteDataType::float32_datatype(),
ColumnDataType::Float64 => ConcreteDataType::float64_datatype(),
ColumnDataType::Binary => ConcreteDataType::binary_datatype(),
ColumnDataType::String => ConcreteDataType::string_datatype(),
ColumnDataType::Date => ConcreteDataType::date_datatype(),
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
ColumnDataType::Timestamp => ConcreteDataType::timestamp_millis_datatype(),
}
}
}
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
type Error = error::Error;
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
let datatype = ColumnDataTypeWrapper(match datatype {
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
ConcreteDataType::Int32(_) => ColumnDataType::Int32,
ConcreteDataType::Int64(_) => ColumnDataType::Int64,
ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
ConcreteDataType::Timestamp(_) => ColumnDataType::Timestamp,
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
});
Ok(datatype)
}
}
impl Values {
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
match datatype {
ColumnDataType::Boolean => Values {
bool_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int8 => Values {
i8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int16 => Values {
i16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int32 => Values {
i32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int64 => Values {
i64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint8 => Values {
u8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint16 => Values {
u16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint32 => Values {
u32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint64 => Values {
u64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float32 => Values {
f32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float64 => Values {
f64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Binary => Values {
binary_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::String => Values {
string_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Date => Values {
date_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Datetime => Values {
datetime_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Timestamp => Values {
ts_millis_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_values_with_capacity() {
let values = Values::with_capacity(ColumnDataType::Int8, 2);
let values = values.i8_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Int32, 2);
let values = values.i32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Int64, 2);
let values = values.i64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
let values = values.u8_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
let values = values.u32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
let values = values.u64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Float32, 2);
let values = values.f32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Float64, 2);
let values = values.f64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Binary, 2);
let values = values.binary_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
let values = values.bool_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::String, 2);
let values = values.string_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Date, 2);
let values = values.date_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
let values = values.datetime_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Timestamp, 2);
let values = values.ts_millis_values;
assert_eq!(2, values.capacity());
}
#[test]
fn test_concrete_datatype_from_column_datatype() {
assert_eq!(
ConcreteDataType::boolean_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
);
assert_eq!(
ConcreteDataType::int8_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Int8).into()
);
assert_eq!(
ConcreteDataType::int16_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Int16).into()
);
assert_eq!(
ConcreteDataType::int32_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Int32).into()
);
assert_eq!(
ConcreteDataType::int64_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Int64).into()
);
assert_eq!(
ConcreteDataType::uint8_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
);
assert_eq!(
ConcreteDataType::uint16_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
);
assert_eq!(
ConcreteDataType::uint32_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
);
assert_eq!(
ConcreteDataType::uint64_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
);
assert_eq!(
ConcreteDataType::float32_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Float32).into()
);
assert_eq!(
ConcreteDataType::float64_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Float64).into()
);
assert_eq!(
ConcreteDataType::binary_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Binary).into()
);
assert_eq!(
ConcreteDataType::string_datatype(),
ColumnDataTypeWrapper(ColumnDataType::String).into()
);
assert_eq!(
ConcreteDataType::date_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Date).into()
);
assert_eq!(
ConcreteDataType::datetime_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
);
assert_eq!(
ConcreteDataType::timestamp_millis_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Timestamp).into()
);
}
#[test]
fn test_column_datatype_from_concrete_datatype() {
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Boolean),
ConcreteDataType::boolean_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Int8),
ConcreteDataType::int8_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Int16),
ConcreteDataType::int16_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Int32),
ConcreteDataType::int32_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Int64),
ConcreteDataType::int64_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Uint8),
ConcreteDataType::uint8_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Uint16),
ConcreteDataType::uint16_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Uint32),
ConcreteDataType::uint32_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Uint64),
ConcreteDataType::uint64_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Float32),
ConcreteDataType::float32_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Float64),
ConcreteDataType::float64_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Binary),
ConcreteDataType::binary_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::String),
ConcreteDataType::string_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Date),
ConcreteDataType::date_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Datetime),
ConcreteDataType::datetime_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Timestamp),
ConcreteDataType::timestamp_millis_datatype()
.try_into()
.unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Failed to create column datatype from Null(NullType)"
);
let result: Result<ColumnDataTypeWrapper> =
ConcreteDataType::list_datatype(ConcreteDataType::boolean_datatype()).try_into();
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Failed to create column datatype from List(ListType { inner: Boolean(BooleanType) })"
);
}
}

View File

@@ -1,3 +1,6 @@
pub mod error;
pub mod helper;
pub mod prometheus;
pub mod serde;
pub mod v1;

View File

@@ -0,0 +1,5 @@
#![allow(clippy::derive_partial_eq_without_eq)]
pub mod remote {
tonic::include_proto!("prometheus");
}

View File

@@ -138,6 +138,7 @@ mod tests {
semantic_type: SEMANTIC_TAG,
values: Some(values),
null_mask,
..Default::default()
};
InsertBatch {
columns: vec![column],
@@ -156,6 +157,7 @@ mod tests {
semantic_type: SEMANTIC_TAG,
values: Some(values),
null_mask,
..Default::default()
};
SelectResult {
columns: vec![column],

View File

@@ -4,3 +4,5 @@ tonic::include_proto!("greptime.v1");
pub mod codec {
tonic::include_proto!("greptime.v1.codec");
}
pub mod meta;

54
src/api/src/v1/meta.rs Normal file
View File

@@ -0,0 +1,54 @@
tonic::include_proto!("greptime.v1.meta");
pub const PROTOCOL_VERSION: u64 = 1;
impl RequestHeader {
#[inline]
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
member_id,
}
}
}
impl ResponseHeader {
#[inline]
pub fn success(cluster_id: u64) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
..Default::default()
}
}
#[inline]
pub fn failed(cluster_id: u64, error: Error) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
error: Some(error),
}
}
}
macro_rules! gen_set_header {
($req: ty) => {
impl $req {
#[inline]
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
self.header = Some(RequestHeader::new((cluster_id, member_id)));
}
}
};
}
gen_set_header!(HeartbeatRequest);
gen_set_header!(RouteRequest);
gen_set_header!(CreateRequest);
gen_set_header!(RangeRequest);
gen_set_header!(PutRequest);
gen_set_header!(BatchPutRequest);
gen_set_header!(CompareAndPutRequest);
gen_set_header!(DeleteRangeRequest);

View File

@@ -2,25 +2,44 @@
name = "catalog"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-trait = "0.1"
api = { path = "../api" }
arc-swap = "1.0"
async-stream = "0.3"
async-trait = "0.1"
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util = "0.3"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
opendal = "0.17"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
table = { path = "../table" }
tokio = { version = "1.18", features = ["full"] }
[dev-dependencies]
chrono = "0.4"
log-store = { path = "../log-store" }
object-store = { path = "../object-store" }
opendal = "0.17"
storage = { path = "../storage" }
table-engine = { path = "../table-engine" }
tempdir = "0.3"
tokio = { version = "1.0", features = ["full"] }

View File

@@ -1,6 +0,0 @@
pub const SYSTEM_CATALOG_NAME: &str = "system";
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
pub const DEFAULT_SCHEMA_NAME: &str = "public";

View File

@@ -21,6 +21,17 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display(
"Failed to create table, table info: {}, source: {}",
table_info,
source
))]
CreateTable {
table_info: String,
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog { msg: String, backtrace: Backtrace },
@@ -89,6 +100,41 @@ pub enum Error {
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("Illegal catalog manager state: {}", msg))]
IllegalManagerState { backtrace: Backtrace, msg: String },
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan {
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec {
#[snafu(backtrace)]
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue {
#[snafu(backtrace)]
source: common_catalog::error::Error,
},
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
Io {
backtrace: Backtrace,
source: std::io::Error,
},
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
MetaSrv {
#[snafu(backtrace)]
source: meta_client::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -97,21 +143,33 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidKey { .. }
| Error::OpenSystemCatalog { .. }
| Error::CreateSystemCatalog { .. }
| Error::SchemaNotFound { .. }
| Error::TableNotFound { .. }
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
| Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
| Error::SystemCatalogTypeMismatch { .. }
| Error::EmptyValue
| Error::ValueDeserialize { .. }
| Error::CatalogNotFound { .. }
| Error::OpenTable { .. }
| Error::ReadSystemCatalog { .. }
| Error::InsertTableRecord { .. } => StatusCode::StorageUnavailable,
| Error::Io { .. } => StatusCode::StorageUnavailable,
Error::ReadSystemCatalog { source, .. } => source.status_code(),
Error::SystemCatalogTypeMismatch { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::RegisterTable { .. } => StatusCode::Internal,
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertTableRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
}
}
@@ -155,7 +213,7 @@ mod tests {
);
assert_eq!(
StatusCode::Unexpected,
StatusCode::StorageUnavailable,
Error::OpenSystemCatalog {
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
}
@@ -163,7 +221,7 @@ mod tests {
);
assert_eq!(
StatusCode::Unexpected,
StatusCode::StorageUnavailable,
Error::CreateSystemCatalog {
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
}
@@ -180,7 +238,7 @@ mod tests {
);
assert_eq!(
StatusCode::StorageUnavailable,
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: DataType::Boolean,
source: datatypes::error::Error::UnsupportedArrowType {

View File

@@ -3,20 +3,22 @@
use std::any::Any;
use std::sync::Arc;
use common_telemetry::info;
use snafu::ResultExt;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef;
pub use crate::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
pub use crate::manager::LocalCatalogManager;
use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
mod consts;
pub mod error;
mod manager;
pub mod memory;
pub mod local;
pub mod remote;
pub mod schema;
mod system;
mod tables;
pub mod system;
pub mod tables;
/// Represent a list of named catalogs
pub trait CatalogList: Sync + Send {
@@ -30,13 +32,13 @@ pub trait CatalogList: Sync + Send {
&self,
name: String,
catalog: CatalogProviderRef,
) -> Option<CatalogProviderRef>;
) -> Result<Option<CatalogProviderRef>>;
/// Retrieves the list of available catalog names
fn catalog_names(&self) -> Vec<String>;
fn catalog_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific catalog by name, provided it exists.
fn catalog(&self, name: &str) -> Option<CatalogProviderRef>;
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>>;
}
/// Represents a catalog, comprising a number of named schemas.
@@ -46,14 +48,17 @@ pub trait CatalogProvider: Sync + Send {
fn as_any(&self) -> &dyn Any;
/// Retrieves the list of available schema names in this catalog.
fn schema_names(&self) -> Vec<String>;
fn schema_names(&self) -> Result<Vec<String>>;
/// Registers schema to this catalog.
fn register_schema(&self, name: String, schema: SchemaProviderRef)
-> Option<SchemaProviderRef>;
fn register_schema(
&self,
name: String,
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>>;
/// Retrieves a specific schema from the catalog by name, provided it exists.
fn schema(&self, name: &str) -> Option<SchemaProviderRef>;
fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
}
pub type CatalogListRef = Arc<dyn CatalogList>;
@@ -70,13 +75,38 @@ pub trait CatalogManager: CatalogList {
/// Registers a table given given catalog/schema to catalog manager,
/// returns table registered.
async fn register_table(&self, request: RegisterTableRequest) -> error::Result<usize>;
/// Register a system table, should be called before starting the manager.
async fn register_system_table(&self, request: RegisterSystemTableRequest)
-> error::Result<()>;
/// Returns the table by catalog, schema and table name.
fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> error::Result<Option<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
/// Hook called after system table opening.
pub type OpenSystemTableHook = Arc<dyn Fn(TableRef) -> error::Result<()> + Send + Sync>;
/// Register system table request:
/// - When system table is already created and registered, the hook will be called
/// with table ref after opening the system table
/// - When system table is not exists, create and register the table by create_table_request and calls open_hook with the created table.
pub struct RegisterSystemTableRequest {
pub create_table_request: CreateTableRequest,
pub open_hook: Option<OpenSystemTableHook>,
}
#[derive(Clone)]
pub struct RegisterTableRequest {
pub catalog: Option<String>,
pub schema: Option<String>,
pub catalog: String,
pub schema: String,
pub table_name: String,
pub table_id: TableId,
pub table: TableRef,
@@ -86,3 +116,53 @@ pub struct RegisterTableRequest {
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
format!("{}.{}.{}", catalog, schema, table)
}
pub trait CatalogProviderFactory {
fn create(&self, catalog_name: String) -> CatalogProviderRef;
}
pub trait SchemaProviderFactory {
fn create(&self, catalog_name: String, schema_name: String) -> SchemaProviderRef;
}
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
manager: &'a M,
engine: TableEngineRef,
sys_table_requests: &'a mut Vec<RegisterSystemTableRequest>,
) -> Result<()> {
for req in sys_table_requests.drain(..) {
let catalog_name = &req.create_table_request.catalog_name;
let schema_name = &req.create_table_request.schema_name;
let table_name = &req.create_table_request.table_name;
let table_id = req.create_table_request.id;
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
table
} else {
let table = engine
.create_table(&EngineContext::default(), req.create_table_request.clone())
.await
.with_context(|_| CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id: {}",
catalog_name, schema_name, table_name, table_id,
),
})?;
manager
.register_table(RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table_name: table_name.clone(),
table_id,
table: table.clone(),
})
.await?;
info!("Created and registered system table: {}", table_name);
table
};
if let Some(hook) = req.open_hook {
(hook)(table)?;
}
}
Ok(())
}

7
src/catalog/src/local.rs Normal file
View File

@@ -0,0 +1,7 @@
pub mod manager;
pub mod memory;
pub use manager::LocalCatalogManager;
pub use memory::{
new_memory_catalog_list, MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider,
};

View File

@@ -2,6 +2,10 @@ use std::any::Any;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
};
use common_recordbatch::RecordBatch;
use common_telemetry::{debug, info};
use datatypes::prelude::ScalarVector;
@@ -13,22 +17,24 @@ use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::NumbersTable;
use table::TableRef;
use super::error::Result;
use crate::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME};
use crate::error::Result;
use crate::error::{
CatalogNotFoundSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, SchemaNotFoundSnafu,
SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu, TableExistsSnafu,
TableNotFoundSnafu,
};
use crate::memory::{MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider};
use crate::local::memory::{MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider};
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
VALUE_INDEX,
};
use crate::tables::SystemCatalog;
use crate::{
format_full_table_name, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
RegisterTableRequest, SchemaProvider, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME,
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
CatalogProvider, CatalogProviderRef, RegisterSystemTableRequest, RegisterTableRequest,
SchemaProvider,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
@@ -37,14 +43,15 @@ pub struct LocalCatalogManager {
catalogs: Arc<MemoryCatalogList>,
engine: TableEngineRef,
next_table_id: AtomicU32,
lock: Mutex<()>,
init_lock: Mutex<bool>,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
impl LocalCatalogManager {
/// Create a new [CatalogManager] with given user catalogs and table engine
pub async fn try_new(engine: TableEngineRef) -> Result<Self> {
let table = SystemCatalogTable::new(engine.clone()).await?;
let memory_catalog_list = crate::memory::new_memory_catalog_list()?;
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
let system_catalog = Arc::new(SystemCatalog::new(
table,
memory_catalog_list.clone(),
@@ -54,8 +61,9 @@ impl LocalCatalogManager {
system: system_catalog,
catalogs: memory_catalog_list,
engine,
next_table_id: AtomicU32::new(0),
lock: Mutex::new(()),
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
system_table_requests: Mutex::new(Vec::default()),
})
}
@@ -78,7 +86,12 @@ impl LocalCatalogManager {
max_table_id
);
self.next_table_id
.store(max_table_id + 1, Ordering::Relaxed);
.store((max_table_id + 1).max(MIN_USER_TABLE_ID), Ordering::Relaxed);
*self.init_lock.lock().await = true;
// Processing system table hooks
let mut sys_table_requests = self.system_table_requests.lock().await;
handle_system_table_request(self, self.engine.clone(), &mut sys_table_requests).await?;
Ok(())
}
@@ -89,9 +102,9 @@ impl LocalCatalogManager {
self.system.information_schema.system.clone(),
)?;
let system_catalog = Arc::new(MemoryCatalogProvider::new());
system_catalog.register_schema(INFORMATION_SCHEMA_NAME.to_string(), system_schema);
system_catalog.register_schema(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
self.catalogs
.register_catalog(SYSTEM_CATALOG_NAME.to_string(), system_catalog);
.register_catalog(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
let default_catalog = Arc::new(MemoryCatalogProvider::new());
let default_schema = Arc::new(MemorySchemaProvider::new());
@@ -101,9 +114,9 @@ impl LocalCatalogManager {
let table = Arc::new(NumbersTable::default());
default_schema.register_table("numbers".to_string(), table)?;
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema);
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
self.catalogs
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog);
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
Ok(())
}
@@ -159,14 +172,14 @@ impl LocalCatalogManager {
Entry::Schema(s) => {
let catalog =
self.catalogs
.catalog(&s.catalog_name)
.catalog(&s.catalog_name)?
.context(CatalogNotFoundSnafu {
catalog_name: &s.catalog_name,
})?;
catalog.register_schema(
s.schema_name.clone(),
Arc::new(MemorySchemaProvider::new()),
);
)?;
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
@@ -183,12 +196,12 @@ impl LocalCatalogManager {
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
let catalog = self
.catalogs
.catalog(&t.catalog_name)
.catalog(&t.catalog_name)?
.context(CatalogNotFoundSnafu {
catalog_name: &t.catalog_name,
})?;
let schema = catalog
.schema(&t.schema_name)
.schema(&t.schema_name)?
.context(SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &t.catalog_name, &t.schema_name),
})?;
@@ -232,19 +245,19 @@ impl CatalogList for LocalCatalogManager {
&self,
name: String,
catalog: CatalogProviderRef,
) -> Option<Arc<dyn CatalogProvider>> {
) -> Result<Option<CatalogProviderRef>> {
self.catalogs.register_catalog(name, catalog)
}
fn catalog_names(&self) -> Vec<String> {
let mut res = self.catalogs.catalog_names();
fn catalog_names(&self) -> Result<Vec<String>> {
let mut res = self.catalogs.catalog_names()?;
res.push(SYSTEM_CATALOG_NAME.to_string());
res
Ok(res)
}
fn catalog(&self, name: &str) -> Option<Arc<dyn CatalogProvider>> {
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
Some(self.system.clone())
Ok(Some(self.system.clone()))
} else {
self.catalogs.catalog(name)
}
@@ -253,7 +266,7 @@ impl CatalogList for LocalCatalogManager {
#[async_trait::async_trait]
impl CatalogManager for LocalCatalogManager {
/// Start [MemoryCatalogManager] to load all information from system catalog table.
/// Start [LocalCatalogManager] to load all information from system catalog table.
/// Make sure table engine is initialized before starting [MemoryCatalogManager].
async fn start(&self) -> Result<()> {
self.init().await
@@ -265,37 +278,39 @@ impl CatalogManager for LocalCatalogManager {
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let _lock = self.lock.lock().await;
let catalog_name = request
.catalog
.unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string());
let schema_name = request
.schema
.unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(&catalog_name)
.context(CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(&schema_name)
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
})?;
if schema.table_exist(&request.table_name) {
if schema.table_exist(&request.table_name)? {
return TableExistsSnafu {
table: format_full_table_name(&catalog_name, &schema_name, &request.table_name),
table: format_full_table_name(catalog_name, schema_name, &request.table_name),
}
.fail();
}
self.system
.register_table(
catalog_name,
schema_name,
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.table_id,
)
@@ -304,4 +319,36 @@ impl CatalogManager for LocalCatalogManager {
schema.register_table(request.table_name, request.table)?;
Ok(1)
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
ensure!(
!*self.init_lock.lock().await,
IllegalManagerStateSnafu {
msg: "Catalog manager already started",
}
);
let mut sys_table_requests = self.system_table_requests.lock().await;
sys_table_requests.push(request);
Ok(())
}
fn table(
&self,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = self
.catalogs
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
})?;
schema.table(table_name)
}
}

View File

@@ -23,7 +23,7 @@ impl MemoryCatalogList {
pub fn register_catalog_if_absent(
&self,
name: String,
catalog: Arc<dyn CatalogProvider>,
catalog: CatalogProviderRef,
) -> Option<CatalogProviderRef> {
let mut catalogs = self.catalogs.write().unwrap();
let entry = catalogs.entry(name);
@@ -46,19 +46,19 @@ impl CatalogList for MemoryCatalogList {
&self,
name: String,
catalog: CatalogProviderRef,
) -> Option<CatalogProviderRef> {
) -> Result<Option<CatalogProviderRef>> {
let mut catalogs = self.catalogs.write().unwrap();
catalogs.insert(name, catalog)
Ok(catalogs.insert(name, catalog))
}
fn catalog_names(&self) -> Vec<String> {
fn catalog_names(&self) -> Result<Vec<String>> {
let catalogs = self.catalogs.read().unwrap();
catalogs.keys().map(|s| s.to_string()).collect()
Ok(catalogs.keys().map(|s| s.to_string()).collect())
}
fn catalog(&self, name: &str) -> Option<CatalogProviderRef> {
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
let catalogs = self.catalogs.read().unwrap();
catalogs.get(name).cloned()
Ok(catalogs.get(name).cloned())
}
}
@@ -87,23 +87,23 @@ impl CatalogProvider for MemoryCatalogProvider {
self
}
fn schema_names(&self) -> Vec<String> {
fn schema_names(&self) -> Result<Vec<String>> {
let schemas = self.schemas.read().unwrap();
schemas.keys().cloned().collect()
Ok(schemas.keys().cloned().collect())
}
fn register_schema(
&self,
name: String,
schema: SchemaProviderRef,
) -> Option<SchemaProviderRef> {
) -> Result<Option<SchemaProviderRef>> {
let mut schemas = self.schemas.write().unwrap();
schemas.insert(name, schema)
Ok(schemas.insert(name, schema))
}
fn schema(&self, name: &str) -> Option<Arc<dyn SchemaProvider>> {
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
let schemas = self.schemas.read().unwrap();
schemas.get(name).cloned()
Ok(schemas.get(name).cloned())
}
}
@@ -132,18 +132,18 @@ impl SchemaProvider for MemorySchemaProvider {
self
}
fn table_names(&self) -> Vec<String> {
fn table_names(&self) -> Result<Vec<String>> {
let tables = self.tables.read().unwrap();
tables.keys().cloned().collect()
Ok(tables.keys().cloned().collect())
}
fn table(&self, name: &str) -> Option<TableRef> {
fn table(&self, name: &str) -> Result<Option<TableRef>> {
let tables = self.tables.read().unwrap();
tables.get(name).cloned()
Ok(tables.get(name).cloned())
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
if self.table_exist(name.as_str()) {
if self.table_exist(name.as_str())? {
return TableExistsSnafu { table: name }.fail()?;
}
let mut tables = self.tables.write().unwrap();
@@ -155,9 +155,9 @@ impl SchemaProvider for MemorySchemaProvider {
Ok(tables.remove(name))
}
fn table_exist(&self, name: &str) -> bool {
fn table_exist(&self, name: &str) -> Result<bool> {
let tables = self.tables.read().unwrap();
tables.contains_key(name)
Ok(tables.contains_key(name))
}
}
@@ -168,40 +168,50 @@ pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogList>> {
#[cfg(test)]
mod tests {
use common_catalog::consts::*;
use common_error::ext::ErrorExt;
use common_error::prelude::StatusCode;
use table::table::numbers::NumbersTable;
use super::*;
use crate::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
#[test]
fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
assert!(catalog_list.catalog(DEFAULT_CATALOG_NAME).is_none());
assert!(catalog_list
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.is_none());
let default_catalog = Arc::new(MemoryCatalogProvider::default());
catalog_list.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog.clone());
catalog_list
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog.clone())
.unwrap();
assert!(default_catalog.schema(DEFAULT_SCHEMA_NAME).is_none());
assert!(default_catalog
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.is_none());
let default_schema = Arc::new(MemorySchemaProvider::default());
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone());
default_catalog
.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())
.unwrap();
default_schema
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
let table = default_schema.table("numbers");
let table = default_schema.table("numbers").unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").is_none());
assert!(default_schema.table("not_exists").unwrap().is_none());
}
#[tokio::test]
async fn test_mem_provider() {
let provider = MemorySchemaProvider::new();
let table_name = "numbers";
assert!(!provider.table_exist(table_name));
assert!(!provider.table_exist(table_name).unwrap());
assert!(provider.deregister_table(table_name).unwrap().is_none());
let test_table = NumbersTable::default();
// register table successfully
@@ -209,7 +219,7 @@ mod tests {
.register_table(table_name.to_string(), Arc::new(test_table))
.unwrap()
.is_none());
assert!(provider.table_exist(table_name));
assert!(provider.table_exist(table_name).unwrap());
let other_table = NumbersTable::default();
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
let err = result.err().unwrap();

94
src/catalog/src/remote.rs Normal file
View File

@@ -0,0 +1,94 @@
use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
pub use client::MetaKvBackend;
use futures::Stream;
use futures_util::StreamExt;
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
use crate::error::Error;
mod client;
mod manager;
#[derive(Debug, Clone)]
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
#[async_trait::async_trait]
pub trait KvBackend: Send + Sync {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, crate::error::Error>
where
'a: 'b;
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), crate::error::Error>;
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), crate::error::Error>;
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
self.delete_range(key, &[]).await
}
/// Default get is implemented based on `range` method.
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
let mut iter = self.range(key);
while let Some(r) = iter.next().await {
let kv = r?;
if kv.0 == key {
return Ok(Some(kv));
}
}
return Ok(None);
}
}
pub type KvBackendRef = Arc<dyn KvBackend>;
#[cfg(test)]
mod tests {
use async_stream::stream;
use super::*;
struct MockKvBackend {}
#[async_trait::async_trait]
impl KvBackend for MockKvBackend {
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
{
Box::pin(stream!({
for i in 0..3 {
yield Ok(Kv(
i.to_string().as_bytes().to_vec(),
i.to_string().as_bytes().to_vec(),
))
}
}))
}
async fn set(&self, _key: &[u8], _val: &[u8]) -> Result<(), Error> {
unimplemented!()
}
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
unimplemented!()
}
}
#[tokio::test]
async fn test_get() {
let backend = MockKvBackend {};
let result = backend.get(0.to_string().as_bytes()).await;
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(1.to_string().as_bytes()).await;
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(2.to_string().as_bytes()).await;
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(3.to_string().as_bytes()).await;
assert!(result.unwrap().is_none());
}
}

View File

@@ -0,0 +1,71 @@
use std::fmt::Debug;
use async_stream::stream;
use common_telemetry::info;
use meta_client::client::MetaClient;
use meta_client::rpc::{DeleteRangeRequest, PutRequest, RangeRequest};
use snafu::ResultExt;
use crate::error::{Error, MetaSrvSnafu};
use crate::remote::{Kv, KvBackend, ValueIter};
#[derive(Debug)]
pub struct MetaKvBackend {
pub client: MetaClient,
}
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
/// comparing to `Accessor`'s list and get method.
#[async_trait::async_trait]
impl KvBackend for MetaKvBackend {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
{
let key = key.to_vec();
Box::pin(stream!({
let mut resp = self
.client
.range(RangeRequest::new().with_prefix(key))
.await
.context(MetaSrvSnafu)?;
let kvs = resp.take_kvs();
for mut kv in kvs.into_iter() {
yield Ok(Kv(kv.take_key(), kv.take_value()))
}
}))
}
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.context(MetaSrvSnafu)?;
Ok(response
.take_kvs()
.get_mut(0)
.map(|kv| Kv(kv.take_key(), kv.take_value())))
}
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
let req = PutRequest::new()
.with_key(key.to_vec())
.with_value(val.to_vec());
let _ = self.client.put(req).await.context(MetaSrvSnafu)?;
Ok(())
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
info!(
"Delete range, key: {}, end: {}, deleted: {}",
String::from_utf8_lossy(key),
String::from_utf8_lossy(end),
resp.deleted()
);
Ok(())
}
}

View File

@@ -0,0 +1,669 @@
use std::any::Any;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_catalog::{
build_catalog_prefix, build_schema_prefix, build_table_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableKey, TableValue,
};
use common_telemetry::{debug, info};
use datatypes::schema::Schema;
use futures::Stream;
use futures_util::StreamExt;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableVersion};
use table::requests::{CreateTableRequest, OpenTableRequest};
use table::TableRef;
use tokio::sync::Mutex;
use crate::error::Result;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
SchemaNotFoundSnafu, TableExistsSnafu,
};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
};
/// Catalog manager based on metasrv.
pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
next_table_id: Arc<AtomicU32>,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogManager {
pub fn new(engine: TableEngineRef, node_id: u64, backend: KvBackendRef) -> Self {
Self {
engine,
node_id,
backend,
catalogs: Default::default(),
next_table_id: Default::default(),
system_table_requests: Default::default(),
mutex: Default::default(),
}
}
fn build_catalog_key(&self, catalog_name: impl AsRef<str>) -> CatalogKey {
CatalogKey {
catalog_name: catalog_name.as_ref().to_string(),
node_id: self.node_id,
}
}
fn new_catalog_provider(&self, catalog_name: &str) -> CatalogProviderRef {
Arc::new(RemoteCatalogProvider {
catalog_name: catalog_name.to_string(),
node_id: self.node_id,
backend: self.backend.clone(),
schemas: Default::default(),
mutex: Default::default(),
}) as _
}
fn new_schema_provider(&self, catalog_name: &str, schema_name: &str) -> SchemaProviderRef {
Arc::new(RemoteSchemaProvider {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
tables: Default::default(),
node_id: self.node_id,
backend: self.backend.clone(),
mutex: Default::default(),
}) as _
}
async fn iter_remote_catalogs(
&self,
) -> Pin<Box<dyn Stream<Item = Result<CatalogKey>> + Send + '_>> {
let catalog_range_prefix = build_catalog_prefix();
info!("catalog_range_prefix: {}", catalog_range_prefix);
let mut catalogs = self.backend.range(catalog_range_prefix.as_bytes());
Box::pin(stream!({
while let Some(r) = catalogs.next().await {
let Kv(k, _) = r?;
if !k.starts_with(catalog_range_prefix.as_bytes()) {
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
continue;
}
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
if key.node_id == self.node_id {
yield Ok(key)
}
}
}))
}
async fn iter_remote_schemas(
&self,
catalog_name: &str,
) -> Pin<Box<dyn Stream<Item = Result<SchemaKey>> + Send + '_>> {
let schema_prefix = build_schema_prefix(catalog_name);
let mut schemas = self.backend.range(schema_prefix.as_bytes());
Box::pin(stream!({
while let Some(r) = schemas.next().await {
let Kv(k, _) = r?;
if !k.starts_with(schema_prefix.as_bytes()) {
debug!("Ignoring non-schema key: {}", String::from_utf8_lossy(&k));
continue;
}
let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
if schema_key.node_id == self.node_id {
yield Ok(schema_key)
}
}
}))
}
/// Iterate over all table entries on metasrv
/// TODO(hl): table entries with different version is not currently considered.
/// Ideally deprecated table entry must be deleted when deregistering from catalog.
async fn iter_remote_tables(
&self,
catalog_name: &str,
schema_name: &str,
) -> Pin<Box<dyn Stream<Item = Result<(TableKey, TableValue)>> + Send + '_>> {
let table_prefix = build_table_prefix(catalog_name, schema_name);
let mut tables = self.backend.range(table_prefix.as_bytes());
Box::pin(stream!({
while let Some(r) = tables.next().await {
let Kv(k, v) = r?;
if !k.starts_with(table_prefix.as_bytes()) {
debug!("Ignoring non-table prefix: {}", String::from_utf8_lossy(&k));
continue;
}
let table_key = TableKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
let table_value = TableValue::parse(&String::from_utf8_lossy(&v))
.context(InvalidCatalogValueSnafu)?;
if table_value.node_id == self.node_id {
yield Ok((table_key, table_value))
}
}
}))
}
/// Fetch catalogs/schemas/tables from remote catalog manager along with max table id allocated.
async fn initiate_catalogs(&self) -> Result<(HashMap<String, CatalogProviderRef>, TableId)> {
let mut res = HashMap::new();
let max_table_id = MIN_USER_TABLE_ID;
// initiate default catalog and schema
let default_catalog = self.initiate_default_catalog().await?;
res.insert(DEFAULT_CATALOG_NAME.to_string(), default_catalog);
info!("Default catalog and schema registered");
let mut catalogs = self.iter_remote_catalogs().await;
while let Some(r) = catalogs.next().await {
let CatalogKey { catalog_name, .. } = r?;
info!("Fetch catalog from metasrv: {}", catalog_name);
let catalog = res
.entry(catalog_name.clone())
.or_insert_with(|| self.new_catalog_provider(&catalog_name))
.clone();
self.initiate_schemas(catalog_name, catalog, max_table_id)
.await?;
}
Ok((res, max_table_id))
}
async fn initiate_schemas(
&self,
catalog_name: String,
catalog: CatalogProviderRef,
max_table_id: TableId,
) -> Result<()> {
let mut schemas = self.iter_remote_schemas(&catalog_name).await;
while let Some(r) = schemas.next().await {
let SchemaKey {
catalog_name,
schema_name,
..
} = r?;
info!("Found schema: {}.{}", catalog_name, schema_name);
let schema = match catalog.schema(&schema_name)? {
None => {
let schema = self.new_schema_provider(&catalog_name, &schema_name);
catalog.register_schema(schema_name.clone(), schema.clone())?;
info!("Registered schema: {}", &schema_name);
schema
}
Some(schema) => schema,
};
info!(
"Fetch schema from metasrv: {}.{}",
&catalog_name, &schema_name
);
self.initiate_tables(&catalog_name, &schema_name, schema, max_table_id)
.await?;
}
Ok(())
}
/// Initiates all tables inside a catalog by fetching data from metasrv.
async fn initiate_tables<'a>(
&'a self,
catalog_name: &'a str,
schema_name: &'a str,
schema: SchemaProviderRef,
mut max_table_id: TableId,
) -> Result<()> {
let mut tables = self.iter_remote_tables(catalog_name, schema_name).await;
while let Some(r) = tables.next().await {
let (table_key, table_value) = r?;
let table_ref = self.open_or_create_table(&table_key, &table_value).await?;
schema.register_table(table_key.table_name.to_string(), table_ref)?;
info!("Registered table {}", &table_key.table_name);
if table_value.id > max_table_id {
info!("Max table id: {} -> {}", max_table_id, table_value.id);
max_table_id = table_value.id;
}
}
Ok(())
}
async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
let schema_key = SchemaKey {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id: self.node_id,
}
.to_string();
self.backend
.set(
schema_key.as_bytes(),
&SchemaValue {}
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
info!("Registered default schema");
let catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id: self.node_id,
}
.to_string();
self.backend
.set(
catalog_key.as_bytes(),
&CatalogValue {}
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
info!("Registered default catalog");
Ok(default_catalog)
}
async fn open_or_create_table(
&self,
table_key: &TableKey,
table_value: &TableValue,
) -> Result<TableRef> {
let context = EngineContext {};
let TableKey {
catalog_name,
schema_name,
table_name,
..
} = table_key;
let TableValue { id, meta, .. } = table_value;
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id: *id,
};
match self
.engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!("{}.{}.{}, id:{}", catalog_name, schema_name, table_name, id,),
})? {
Some(table) => Ok(table),
None => {
let req = CreateTableRequest {
id: *id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: Arc::new(Schema::new(meta.schema.column_schemas.clone())),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
};
self.engine
.create_table(&context, req)
.await
.context(CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
&catalog_name, &schema_name, &table_name, id
),
})
}
}
}
}
#[async_trait::async_trait]
impl CatalogManager for RemoteCatalogManager {
async fn start(&self) -> Result<()> {
let (catalogs, max_table_id) = self.initiate_catalogs().await?;
info!(
"Initialized catalogs: {:?}",
catalogs.keys().cloned().collect::<Vec<_>>()
);
self.catalogs.store(Arc::new(catalogs));
self.next_table_id
.store(max_table_id + 1, Ordering::Relaxed);
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
handle_system_table_request(self, self.engine.clone(), &mut system_table_requests).await?;
info!("All system table opened");
Ok(())
}
fn next_table_id(&self) -> TableId {
self.next_table_id.fetch_add(1, Ordering::Relaxed)
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let catalog_name = request.catalog;
let schema_name = request.schema;
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
let schema_provider =
catalog_provider
.schema(&schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &catalog_name, &schema_name),
})?;
if schema_provider.table_exist(&request.table_name)? {
return TableExistsSnafu {
table: format!("{}.{}.{}", &catalog_name, &schema_name, &request.table_name),
}
.fail();
}
schema_provider.register_table(request.table_name, request.table)?;
Ok(1)
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
let mut requests = self.system_table_requests.lock().await;
requests.push(request);
Ok(())
}
fn table(
&self,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = self
.catalog(catalog_name)?
.with_context(|| CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
})?;
schema.table(table_name)
}
}
impl CatalogList for RemoteCatalogManager {
fn as_any(&self) -> &dyn Any {
self
}
fn register_catalog(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Result<Option<CatalogProviderRef>> {
let key = self.build_catalog_key(&name).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let catalogs = self.catalogs.clone();
std::thread::spawn(|| {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
backend
.set(
key.as_bytes(),
&CatalogValue {}
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_catalogs = catalogs.load();
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
new_catalogs.clone_from(&prev_catalogs);
let prev = new_catalogs.insert(name, catalog);
catalogs.store(Arc::new(new_catalogs));
Ok(prev)
})
})
.join()
.unwrap()
}
/// List all catalogs from metasrv
fn catalog_names(&self) -> Result<Vec<String>> {
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
}
/// Read catalog info of given name from metasrv.
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
Ok(self.catalogs.load().get(name).cloned())
}
}
pub struct RemoteCatalogProvider {
catalog_name: String,
node_id: u64,
backend: KvBackendRef,
schemas: Arc<ArcSwap<HashMap<String, SchemaProviderRef>>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogProvider {
pub fn new(catalog_name: String, node_id: u64, backend: KvBackendRef) -> Self {
Self {
catalog_name,
node_id,
backend,
schemas: Default::default(),
mutex: Default::default(),
}
}
fn build_schema_key(&self, schema_name: impl AsRef<str>) -> SchemaKey {
SchemaKey {
catalog_name: self.catalog_name.clone(),
schema_name: schema_name.as_ref().to_string(),
node_id: self.node_id,
}
}
}
impl CatalogProvider for RemoteCatalogProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn schema_names(&self) -> Result<Vec<String>> {
Ok(self.schemas.load().keys().cloned().collect::<Vec<_>>())
}
fn register_schema(
&self,
name: String,
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>> {
let key = self.build_schema_key(&name).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let schemas = self.schemas.clone();
std::thread::spawn(|| {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
backend
.set(
key.as_bytes(),
&SchemaValue {}
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_schemas = schemas.load();
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
new_schemas.clone_from(&prev_schemas);
let prev_schema = new_schemas.insert(name, schema);
schemas.store(Arc::new(new_schemas));
Ok(prev_schema)
})
})
.join()
.unwrap()
}
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
Ok(self.schemas.load().get(name).cloned())
}
}
pub struct RemoteSchemaProvider {
catalog_name: String,
schema_name: String,
node_id: u64,
backend: KvBackendRef,
tables: Arc<ArcSwap<HashMap<String, TableRef>>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteSchemaProvider {
pub fn new(
catalog_name: String,
schema_name: String,
node_id: u64,
backend: KvBackendRef,
) -> Self {
Self {
catalog_name,
schema_name,
node_id,
backend,
tables: Default::default(),
mutex: Default::default(),
}
}
fn build_table_key(
&self,
table_name: impl AsRef<str>,
table_version: TableVersion,
) -> TableKey {
TableKey {
catalog_name: self.catalog_name.clone(),
schema_name: self.schema_name.clone(),
table_name: table_name.as_ref().to_string(),
version: table_version,
node_id: self.node_id,
}
}
}
impl SchemaProvider for RemoteSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn table_names(&self) -> Result<Vec<String>> {
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
Ok(self.tables.load().get(name).cloned())
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
let table_info = table.table_info();
let table_version = table_info.ident.version;
let table_value = TableValue {
meta: table_info.meta.clone().into(),
id: table_info.ident.table_id,
node_id: self.node_id,
regions_ids: vec![],
};
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let tables = self.tables.clone();
let table_key = self
.build_table_key(name.clone(), table_version)
.to_string();
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
let _guard = mutex.lock().await;
backend
.set(
table_key.as_bytes(),
&table_value.as_bytes().context(InvalidCatalogValueSnafu)?,
)
.await?;
debug!(
"Successfully set catalog table entry, key: {}, table value: {:?}",
table_key, table_value
);
let prev_tables = tables.load();
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
new_tables.clone_from(&prev_tables);
let prev = new_tables.insert(name, table);
tables.store(Arc::new(new_tables));
Ok(prev)
})
})
.join()
.unwrap();
prev
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
let table_version = match self.tables.load().get(name) {
None => return Ok(None),
Some(t) => t.table_info().ident.version,
};
let table_name = name.to_string();
let table_key = self.build_table_key(&table_name, table_version).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let tables = self.tables.clone();
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
let _guard = mutex.lock().await;
backend.delete(table_key.as_bytes()).await?;
debug!(
"Successfully deleted catalog table entry, key: {}",
table_key
);
let prev_tables = tables.load();
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
new_tables.clone_from(&prev_tables);
let prev = new_tables.remove(&table_name);
tables.store(Arc::new(new_tables));
Ok(prev)
})
})
.join()
.unwrap();
prev
}
/// Checks if table exists in schema provider based on locally opened table map.
fn table_exist(&self, name: &str) -> Result<bool> {
Ok(self.tables.load().contains_key(name))
}
}

View File

@@ -12,10 +12,10 @@ pub trait SchemaProvider: Sync + Send {
fn as_any(&self) -> &dyn Any;
/// Retrieves the list of available table names in this schema.
fn table_names(&self) -> Vec<String>;
fn table_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific table from the schema by name, provided it exists.
fn table(&self, name: &str) -> Option<TableRef>;
fn table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.
@@ -28,7 +28,7 @@ pub trait SchemaProvider: Sync + Send {
/// If supported by the implementation, checks the table exist in the schema provider or not.
/// If no matched table in the schema provider, return false.
/// Otherwise, return true.
fn table_exist(&self, name: &str) -> bool;
fn table_exist(&self, name: &str) -> Result<bool>;
}
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;

View File

@@ -2,26 +2,29 @@ use std::any::Any;
use std::collections::HashMap;
use std::sync::Arc;
use common_query::logical_plan::Expr;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::{BinaryVector, Int64Vector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
use table::{Table, TableRef};
use crate::consts::{
use common_catalog::consts::{
INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_query::physical_plan::RuntimeEnv;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::timestamp::Timestamp;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableInfoRef};
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
use table::{Table, TableRef};
use crate::error::{
CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
};
@@ -31,7 +34,7 @@ pub const TIMESTAMP_INDEX: usize = 2;
pub const VALUE_INDEX: usize = 3;
pub struct SystemCatalogTable {
schema: SchemaRef,
table_info: TableInfoRef,
pub table: TableRef,
}
@@ -42,7 +45,7 @@ impl Table for SystemCatalogTable {
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
self.table_info.meta.schema.clone()
}
async fn scan(
@@ -50,7 +53,7 @@ impl Table for SystemCatalogTable {
_projection: &Option<Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> table::Result<SendableRecordBatchStream> {
) -> table::Result<PhysicalPlanRef> {
panic!("System catalog table does not support scan!")
}
@@ -58,6 +61,10 @@ impl Table for SystemCatalogTable {
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
self.table.insert(request).await
}
fn table_info(&self) -> TableInfoRef {
self.table_info.clone()
}
}
impl SystemCatalogTable {
@@ -76,32 +83,45 @@ impl SystemCatalogTable {
.await
.context(OpenSystemCatalogSnafu)?
{
Ok(Self { table, schema })
Ok(Self {
table_info: table.table_info(),
table,
})
} else {
// system catalog table is not yet created, try to create
let request = CreateTableRequest {
id: SYSTEM_CATALOG_TABLE_ID,
catalog_name: Some(SYSTEM_CATALOG_NAME.to_string()),
schema_name: Some(INFORMATION_SCHEMA_NAME.to_string()),
catalog_name: SYSTEM_CATALOG_NAME.to_string(),
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX, TIMESTAMP_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
};
let table = engine
.create_table(&ctx, request)
.await
.context(CreateSystemCatalogSnafu)?;
Ok(Self { table, schema })
let table_info = table.table_info();
Ok(Self { table, table_info })
}
}
/// Create a stream of all entries inside system catalog table
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
let full_projection = None;
let stream = self.table.scan(&full_projection, &[], None).await.unwrap();
let scan = self
.table
.scan(&full_projection, &[], None)
.await
.context(error::SystemCatalogTableScanSnafu)?;
let stream = scan
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.context(error::SystemCatalogTableScanExecSnafu)?;
Ok(stream)
}
}
@@ -128,7 +148,7 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"timestamp".to_string(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
),
ColumnSchema::new(
@@ -138,19 +158,20 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"gmt_created".to_string(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
),
ColumnSchema::new(
"gmt_modified".to_string(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
),
];
// The schema of this table must be valid.
SchemaBuilder::from(cols)
.timestamp_index(2)
SchemaBuilder::try_from(cols)
.unwrap()
.timestamp_index(Some(2))
.build()
.unwrap()
}
@@ -170,7 +191,7 @@ pub fn build_table_insert_request(full_table_name: String, table_id: TableId) ->
// Timestamp in key part is intentionally left to 0
columns_values.insert(
"timestamp".to_string(),
Arc::new(Int64Vector::from_slice(&[0])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(0)])) as _,
);
columns_values.insert(
@@ -184,12 +205,16 @@ pub fn build_table_insert_request(full_table_name: String, table_id: TableId) ->
columns_values.insert(
"gmt_created".to_string(),
Arc::new(Int64Vector::from_slice(&[util::current_time_millis()])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
);
columns_values.insert(
"gmt_modified".to_string(),
Arc::new(Int64Vector::from_slice(&[util::current_time_millis()])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
);
InsertRequest {
@@ -313,6 +338,16 @@ pub struct TableEntryValue {
#[cfg(test)]
mod tests {
use log_store::fs::noop::NoopLogStore;
use object_store::ObjectStore;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use table_engine::config::EngineConfig;
use table_engine::engine::MitoEngine;
use tempdir::TempDir;
use super::*;
#[test]
@@ -384,4 +419,43 @@ mod tests {
assert_eq!(EntryType::Table, EntryType::try_from(3).unwrap());
assert!(EntryType::try_from(4).is_err());
}
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let store_dir = dir.path().to_string_lossy();
let accessor = opendal::services::fs::Builder::default()
.root(&store_dir)
.build()
.unwrap();
let object_store = ObjectStore::new(accessor);
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
),
object_store,
));
(dir, table_engine)
}
#[tokio::test]
async fn test_system_table_type() {
let (_dir, table_engine) = prepare_table_engine().await;
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
assert_eq!(Base, system_table.table_type());
}
#[tokio::test]
async fn test_system_table_info() {
let (_dir, table_engine) = prepare_table_engine().await;
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
let info = system_table.table_info();
assert_eq!(TableType::Base, info.table_type);
assert_eq!(SYSTEM_CATALOG_TABLE_NAME, info.name);
assert_eq!(SYSTEM_CATALOG_TABLE_ID, info.ident.table_id);
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
}
}

View File

@@ -6,9 +6,12 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use async_stream::stream;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_error::ext::BoxedError;
use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
use common_recordbatch::{RecordBatch, RecordBatchStream};
use datatypes::prelude::{ConcreteDataType, VectorBuilder};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
@@ -16,11 +19,12 @@ use datatypes::vectors::VectorRef;
use futures::Stream;
use snafu::ResultExt;
use table::engine::TableEngineRef;
use table::metadata::TableId;
use table::error::TablesRecordBatchSnafu;
use table::metadata::{TableId, TableInfoRef};
use table::table::scan::SimpleTableScan;
use table::{Table, TableRef};
use crate::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use crate::error::InsertTableRecordSnafu;
use crate::error::{Error, InsertTableRecordSnafu};
use crate::system::{build_table_insert_request, SystemCatalogTable};
use crate::{
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
@@ -53,23 +57,53 @@ impl Table for Tables {
self.schema.clone()
}
fn table_info(&self) -> TableInfoRef {
unreachable!("Tables does not support table_info method")
}
async fn scan(
&self,
_projection: &Option<Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> table::error::Result<SendableRecordBatchStream> {
) -> table::error::Result<PhysicalPlanRef> {
let catalogs = self.catalogs.clone();
let schema_ref = self.schema.clone();
let engine_name = self.engine_name.clone();
let stream = stream!({
for catalog_name in catalogs.catalog_names() {
let catalog = catalogs.catalog(&catalog_name).unwrap();
for schema_name in catalog.schema_names() {
let mut tables_in_schema = Vec::with_capacity(catalog.schema_names().len());
let schema = catalog.schema(&schema_name).unwrap();
for table_name in schema.table_names() {
for catalog_name in catalogs
.catalog_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
let catalog = catalogs
.catalog(&catalog_name)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.unwrap();
for schema_name in catalog
.schema_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
let mut tables_in_schema = Vec::with_capacity(
catalog
.schema_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.len(),
);
let schema = catalog
.schema(&schema_name)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.unwrap();
for table_name in schema
.table_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
tables_in_schema.push(table_name);
}
@@ -85,10 +119,11 @@ impl Table for Tables {
}
});
Ok(Box::pin(TablesRecordBatchStream {
let stream = Box::pin(TablesRecordBatchStream {
schema: self.schema.clone(),
stream: Box::pin(stream),
}))
});
Ok(Arc::new(SimpleTableScan::new(stream)))
}
}
@@ -152,17 +187,20 @@ impl SchemaProvider for InformationSchema {
self
}
fn table_names(&self) -> Vec<String> {
vec!["tables".to_string(), SYSTEM_CATALOG_TABLE_NAME.to_string()]
fn table_names(&self) -> Result<Vec<String>, Error> {
Ok(vec![
"tables".to_string(),
SYSTEM_CATALOG_TABLE_NAME.to_string(),
])
}
fn table(&self, name: &str) -> Option<TableRef> {
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case("tables") {
Some(self.tables.clone())
Ok(Some(self.tables.clone()))
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
Some(self.system.clone())
Ok(Some(self.system.clone()))
} else {
None
Ok(None)
}
}
@@ -178,8 +216,9 @@ impl SchemaProvider for InformationSchema {
panic!("System catalog & schema does not support deregister table")
}
fn table_exist(&self, name: &str) -> bool {
name.eq_ignore_ascii_case("tables") || name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME)
fn table_exist(&self, name: &str) -> Result<bool, Error> {
Ok(name.eq_ignore_ascii_case("tables")
|| name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
}
}
@@ -224,23 +263,23 @@ impl CatalogProvider for SystemCatalog {
self
}
fn schema_names(&self) -> Vec<String> {
vec![INFORMATION_SCHEMA_NAME.to_string()]
fn schema_names(&self) -> Result<Vec<String>, Error> {
Ok(vec![INFORMATION_SCHEMA_NAME.to_string()])
}
fn register_schema(
&self,
_name: String,
_schema: SchemaProviderRef,
) -> Option<SchemaProviderRef> {
) -> Result<Option<SchemaProviderRef>, Error> {
panic!("System catalog does not support registering schema!")
}
fn schema(&self, name: &str) -> Option<Arc<dyn SchemaProvider>> {
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
if name.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
Some(self.information_schema.clone())
Ok(Some(self.information_schema.clone()))
} else {
None
Ok(None)
}
}
}
@@ -273,13 +312,16 @@ fn build_schema_for_tables() -> Schema {
#[cfg(test)]
mod tests {
use common_query::physical_plan::RuntimeEnv;
use datatypes::arrow::array::Utf8Array;
use datatypes::arrow::datatypes::DataType;
use futures_util::StreamExt;
use table::table::numbers::NumbersTable;
use super::*;
use crate::memory::{new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider};
use crate::local::memory::{
new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider,
};
use crate::CatalogList;
#[tokio::test]
@@ -290,25 +332,33 @@ mod tests {
schema
.register_table("test_table".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
catalog_provider.register_schema("test_schema".to_string(), schema);
catalog_list.register_catalog("test_catalog".to_string(), catalog_provider);
catalog_provider
.register_schema("test_schema".to_string(), schema)
.unwrap();
catalog_list
.register_catalog("test_catalog".to_string(), catalog_provider)
.unwrap();
let tables = Tables::new(catalog_list, "test_engine".to_string());
let mut tables_stream = tables.scan(&None, &[], None).await.unwrap();
let tables_stream = tables.scan(&None, &[], None).await.unwrap();
let mut tables_stream = tables_stream
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.unwrap();
if let Some(t) = tables_stream.next().await {
let batch = t.unwrap().df_recordbatch;
assert_eq!(1, batch.num_rows());
assert_eq!(4, batch.num_columns());
assert_eq!(&DataType::LargeUtf8, batch.column(0).data_type());
assert_eq!(&DataType::LargeUtf8, batch.column(1).data_type());
assert_eq!(&DataType::LargeUtf8, batch.column(2).data_type());
assert_eq!(&DataType::LargeUtf8, batch.column(3).data_type());
assert_eq!(&DataType::Utf8, batch.column(0).data_type());
assert_eq!(&DataType::Utf8, batch.column(1).data_type());
assert_eq!(&DataType::Utf8, batch.column(2).data_type());
assert_eq!(&DataType::Utf8, batch.column(3).data_type());
assert_eq!(
"test_catalog",
batch
.column(0)
.as_any()
.downcast_ref::<Utf8Array<i64>>()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
@@ -318,7 +368,7 @@ mod tests {
batch
.column(1)
.as_any()
.downcast_ref::<Utf8Array<i64>>()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
@@ -328,7 +378,7 @@ mod tests {
batch
.column(2)
.as_any()
.downcast_ref::<Utf8Array<i64>>()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
@@ -338,7 +388,7 @@ mod tests {
batch
.column(3)
.as_any()
.downcast_ref::<Utf8Array<i64>>()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);

163
src/catalog/tests/mock.rs Normal file
View File

@@ -0,0 +1,163 @@
use std::collections::{BTreeMap, HashMap};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::sync::Arc;
use async_stream::stream;
use catalog::error::Error;
use catalog::remote::{Kv, KvBackend, ValueIter};
use common_recordbatch::RecordBatch;
use common_telemetry::logging::info;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use serde::Serializer;
use table::engine::{EngineContext, TableEngine};
use table::metadata::TableId;
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::test_util::MemTable;
use table::TableRef;
use tokio::sync::RwLock;
#[derive(Default)]
pub struct MockKvBackend {
map: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
}
impl Display for MockKvBackend {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
futures::executor::block_on(async {
let map = self.map.read().await;
for (k, v) in map.iter() {
f.serialize_str(&String::from_utf8_lossy(k))?;
f.serialize_str(" -> ")?;
f.serialize_str(&String::from_utf8_lossy(v))?;
f.serialize_str("\n")?;
}
Ok(())
})
}
}
#[async_trait::async_trait]
impl KvBackend for MockKvBackend {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
{
let prefix = key.to_vec();
let prefix_string = String::from_utf8_lossy(&prefix).to_string();
Box::pin(stream!({
let maps = self.map.read().await.clone();
for (k, v) in maps.range(prefix.clone()..) {
let key_string = String::from_utf8_lossy(k).to_string();
let matches = key_string.starts_with(&prefix_string);
if matches {
yield Ok(Kv(k.clone(), v.clone()))
} else {
info!("Stream finished");
return;
}
}
}))
}
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
let mut map = self.map.write().await;
map.insert(key.to_vec(), val.to_vec());
Ok(())
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
let start = key.to_vec();
let end = end.to_vec();
let range = start..end;
let mut map = self.map.write().await;
map.retain(|k, _| !range.contains(k));
Ok(())
}
}
#[derive(Default)]
pub struct MockTableEngine {
tables: RwLock<HashMap<String, TableRef>>,
}
#[async_trait::async_trait]
impl TableEngine for MockTableEngine {
fn name(&self) -> &str {
"MockTableEngine"
}
/// Create a table with only one column
async fn create_table(
&self,
_ctx: &EngineContext,
request: CreateTableRequest,
) -> table::Result<TableRef> {
let table_name = request.table_name.clone();
let catalog_name = request.catalog_name.clone();
let schema_name = request.schema_name.clone();
let default_table_id = "0".to_owned();
let table_id = TableId::from_str(
request
.table_options
.get("table_id")
.unwrap_or(&default_table_id),
)
.unwrap();
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
"name",
ConcreteDataType::string_datatype(),
true,
)]));
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
let record_batch = RecordBatch::new(schema, data).unwrap();
let table: TableRef = Arc::new(MemTable::new_with_catalog(
&table_name,
record_batch,
table_id,
catalog_name,
schema_name,
)) as Arc<_>;
let mut tables = self.tables.write().await;
tables.insert(table_name, table.clone() as TableRef);
Ok(table)
}
async fn open_table(
&self,
_ctx: &EngineContext,
request: OpenTableRequest,
) -> table::Result<Option<TableRef>> {
Ok(self.tables.read().await.get(&request.table_name).cloned())
}
async fn alter_table(
&self,
_ctx: &EngineContext,
_request: AlterTableRequest,
) -> table::Result<TableRef> {
unimplemented!()
}
fn get_table(&self, _ctx: &EngineContext, name: &str) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async { Ok(self.tables.read().await.get(name).cloned()) })
}
fn table_exists(&self, _ctx: &EngineContext, name: &str) -> bool {
futures::executor::block_on(async { self.tables.read().await.contains_key(name) })
}
async fn drop_table(
&self,
_ctx: &EngineContext,
_request: DropTableRequest,
) -> table::Result<()> {
unimplemented!()
}
}

View File

@@ -0,0 +1,274 @@
#![feature(assert_matches)]
mod mock;
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashSet;
use std::sync::Arc;
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use datatypes::schema::Schema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use crate::mock::{MockKvBackend, MockTableEngine};
#[tokio::test]
async fn test_backend() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let backend = MockKvBackend::default();
let default_catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id,
}
.to_string();
backend
.set(
default_catalog_key.as_bytes(),
&CatalogValue {}.to_bytes().unwrap(),
)
.await
.unwrap();
let schema_key = SchemaKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
node_id,
}
.to_string();
backend
.set(schema_key.as_bytes(), &SchemaValue {}.to_bytes().unwrap())
.await
.unwrap();
let mut iter = backend.range("__c-".as_bytes());
let mut res = HashSet::new();
while let Some(r) = iter.next().await {
let kv = r.unwrap();
res.insert(String::from_utf8_lossy(&kv.0).to_string());
}
assert_eq!(
vec!["__c-greptime-42".to_string()],
res.into_iter().collect::<Vec<_>>()
);
}
async fn prepare_components(node_id: u64) -> (KvBackendRef, TableEngineRef, CatalogManagerRef) {
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
let table_engine = Arc::new(MockTableEngine::default());
let catalog_manager =
RemoteCatalogManager::new(table_engine.clone(), node_id, backend.clone());
catalog_manager.start().await.unwrap();
(backend, table_engine, Arc::new(catalog_manager))
}
#[tokio::test]
async fn test_remote_catalog_default() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let (_, _, catalog_manager) = prepare_components(node_id).await;
assert_eq!(
vec![DEFAULT_CATALOG_NAME.to_string()],
catalog_manager.catalog_names().unwrap()
);
let default_catalog = catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap();
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
default_catalog.schema_names().unwrap()
);
}
#[tokio::test]
async fn test_remote_catalog_register_nonexistent() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
// register a new table with an nonexistent catalog
let catalog_name = "nonexistent_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table = table_engine
.create_table(
&EngineContext {},
CreateTableRequest {
id: 1,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name,
schema: schema_name,
table_name,
table_id: 1,
table,
};
let res = catalog_manager.register_table(reg_req).await;
// because nonexistent_catalog does not exist yet.
assert_matches!(
res.err().unwrap(),
catalog::error::Error::CatalogNotFound { .. }
);
}
#[tokio::test]
async fn test_register_table() {
let node_id = 42;
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
let default_catalog = catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap();
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
default_catalog.schema_names().unwrap()
);
let default_schema = default_catalog
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
assert_eq!(Vec::<String>::new(), default_schema.table_names().unwrap());
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
let schema_name = DEFAULT_SCHEMA_NAME.to_string();
let table_name = "test_table".to_string();
let table_id = 1;
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table = table_engine
.create_table(
&EngineContext {},
CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name,
schema: schema_name,
table_name: table_name.clone(),
table_id,
table,
};
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
}
#[tokio::test]
async fn test_register_catalog_schema_table() {
let node_id = 42;
let (backend, table_engine, catalog_manager) = prepare_components(node_id).await;
let catalog_name = "test_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let catalog = Arc::new(RemoteCatalogProvider::new(
catalog_name.clone(),
node_id,
backend.clone(),
));
// register catalog to catalog manager
catalog_manager
.register_catalog(catalog_name.clone(), catalog)
.unwrap();
assert_eq!(
HashSet::<String>::from_iter(
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
),
HashSet::from_iter(catalog_manager.catalog_names().unwrap().into_iter())
);
let table_to_register = table_engine
.create_table(
&EngineContext {},
CreateTableRequest {
id: 2,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: "".to_string(),
desc: None,
schema: Arc::new(Schema::new(vec![])),
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table_name: " fail_table".to_string(),
table_id: 2,
table: table_to_register,
};
// this register will fail since schema does not exist yet
assert_matches!(
catalog_manager
.register_table(reg_req.clone())
.await
.unwrap_err(),
catalog::error::Error::SchemaNotFound { .. }
);
let new_catalog = catalog_manager
.catalog(&catalog_name)
.unwrap()
.expect("catalog should exist since it's already registered");
let schema = Arc::new(RemoteSchemaProvider::new(
catalog_name.clone(),
schema_name.clone(),
node_id,
backend.clone(),
));
let prev = new_catalog
.register_schema(schema_name.clone(), schema.clone())
.expect("Register schema should not fail");
assert!(prev.is_none());
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([schema_name.clone()]),
new_catalog.schema_names().unwrap().into_iter().collect()
)
}
}

View File

@@ -2,18 +2,39 @@
name = "client"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
api = { path = "../api" }
async-stream = "0.3"
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
parking_lot = "0.12"
rand = "0.8"
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
[dev-dependencies]
datanode = { path = "../datanode" }
substrait = { path = "../common/substrait" }
tokio = { version = "1.0", features = ["full"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
[dev-dependencies.prost_09]
package = "prost"
version = "0.9"
[dev-dependencies.substrait_proto]
package = "substrait"
version = "0.2"

View File

@@ -1,3 +1,5 @@
use std::collections::HashMap;
use api::v1::{codec::InsertBatch, *};
use client::{Client, Database};
@@ -10,10 +12,17 @@ fn main() {
#[tokio::main]
async fn run() {
let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
db.insert("demo", insert_batches()).await.unwrap();
let expr = InsertExpr {
table_name: "demo".to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: insert_batches(),
})),
options: HashMap::default(),
};
db.insert(expr).await.unwrap();
}
fn insert_batches() -> Vec<Vec<u8>> {
@@ -37,6 +46,7 @@ fn insert_batches() -> Vec<Vec<u8>> {
semantic_type: SEMANTIC_TAG,
values: Some(host_vals),
null_mask: vec![0],
..Default::default()
};
let cpu_vals = column::Values {
@@ -48,6 +58,7 @@ fn insert_batches() -> Vec<Vec<u8>> {
semantic_type: SEMANTIC_FEILD,
values: Some(cpu_vals),
null_mask: vec![2],
..Default::default()
};
let mem_vals = column::Values {
@@ -59,6 +70,7 @@ fn insert_batches() -> Vec<Vec<u8>> {
semantic_type: SEMANTIC_FEILD,
values: Some(mem_vals),
null_mask: vec![4],
..Default::default()
};
let ts_vals = column::Values {
@@ -70,6 +82,7 @@ fn insert_batches() -> Vec<Vec<u8>> {
semantic_type: SEMANTIC_TS,
values: Some(ts_vals),
null_mask: vec![0],
..Default::default()
};
let insert_batch = InsertBatch {

View File

@@ -0,0 +1,96 @@
use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
use client::{admin::Admin, Client, Database};
use prost_09::Message;
use substrait_proto::protobuf::{
plan_rel::RelType as PlanRelType,
read_rel::{NamedTable, ReadType},
rel::RelType,
PlanRel, ReadRel, Rel,
};
use tracing::{event, Level};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let create_table_expr = CreateExpr {
catalog_name: Some("greptime".to_string()),
schema_name: Some("public".to_string()),
table_name: "test_logical_dist_exec".to_string(),
desc: None,
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
datatype: ColumnDataType::Timestamp as i32,
is_nullable: false,
default_constraint: None,
},
ColumnDef {
name: "key".to_string(),
datatype: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: None,
},
ColumnDef {
name: "value".to_string(),
datatype: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: None,
},
],
time_index: "timestamp".to_string(),
primary_keys: vec!["key".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
};
let admin = Admin::new("create table", client.clone());
let result = admin.create(create_table_expr).await.unwrap();
event!(Level::INFO, "create table result: {:#?}", result);
let logical = mock_logical_plan();
event!(Level::INFO, "plan size: {:#?}", logical.len());
let db = Database::new("greptime", client);
let result = db.logical_plan(logical).await.unwrap();
event!(Level::INFO, "result: {:#?}", result);
}
fn mock_logical_plan() -> Vec<u8> {
let catalog_name = "greptime".to_string();
let schema_name = "public".to_string();
let table_name = "test_logical_dist_exec".to_string();
let named_table = NamedTable {
names: vec![catalog_name, schema_name, table_name],
advanced_extension: None,
};
let read_type = ReadType::NamedTable(named_table);
let read_rel = ReadRel {
common: None,
base_schema: None,
filter: None,
projection: None,
advanced_extension: None,
read_type: Some(read_type),
};
let mut buf = vec![];
let rel = Rel {
rel_type: Some(RelType::Read(Box::new(read_rel))),
};
let plan_rel = PlanRel {
rel_type: Some(PlanRelType::Rel(rel)),
};
plan_rel.encode(&mut buf).unwrap();
buf
}

View File

@@ -16,7 +16,7 @@ fn main() {
#[tokio::main]
async fn run() {
let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let physical = mock_physical_plan();

View File

@@ -10,7 +10,7 @@ fn main() {
#[tokio::main]
async fn run() {
let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let sql = Select::Sql("select * from demo".to_string());

View File

@@ -1,14 +1,102 @@
use api::v1::*;
use common_error::prelude::StatusCode;
use common_query::Output;
use snafu::prelude::*;
use crate::database::PROTOCOL_VERSION;
use crate::error;
use crate::Client;
use crate::Result;
#[derive(Clone, Debug)]
pub struct Admin {
name: String,
client: Client,
}
impl Admin {
pub fn new(client: Client) -> Self {
Self { client }
pub fn new(name: impl Into<String>, client: Client) -> Self {
Self {
name: name.into(),
client,
}
}
// TODO(jiachun): admin api
pub async fn create(&self, expr: CreateExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::Create(expr)),
};
self.do_request(expr).await
}
pub async fn do_request(&self, expr: AdminExpr) -> Result<AdminResult> {
// `remove(0)` is safe because of `do_requests`'s invariants.
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
pub async fn alter(&self, expr: AlterExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::Alter(expr)),
};
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
async fn do_requests(&self, exprs: Vec<AdminExpr>) -> Result<Vec<AdminResult>> {
let expr_count = exprs.len();
let req = AdminRequest {
name: self.name.clone(),
exprs,
};
let resp = self.client.admin(req).await?;
let results = resp.results;
ensure!(
results.len() == expr_count,
error::MissingResultSnafu {
name: "admin_results",
expected: expr_count,
actual: results.len(),
}
);
Ok(results)
}
}
pub fn admin_result_to_output(admin_result: AdminResult) -> Result<Output> {
let header = admin_result.header.context(error::MissingHeaderSnafu)?;
if !StatusCode::is_success(header.code) {
return error::DatanodeSnafu {
code: header.code,
msg: header.err_msg,
}
.fail();
}
let result = admin_result.result.context(error::MissingResultSnafu {
name: "result".to_string(),
expected: 1_usize,
actual: 0_usize,
})?;
let output = match result {
admin_result::Result::Mutate(mutate) => {
if mutate.failure != 0 {
return error::MutateFailureSnafu {
failure: mutate.failure,
}
.fail();
}
Output::AffectedRows(mutate.success as usize)
}
};
Ok(output)
}

View File

@@ -1,22 +1,96 @@
use api::v1::{greptime_client::GreptimeClient, *};
use snafu::{OptionExt, ResultExt};
use std::sync::Arc;
use api::v1::greptime_client::GreptimeClient;
use api::v1::*;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
use snafu::OptionExt;
use snafu::ResultExt;
use tonic::transport::Channel;
use crate::error;
use crate::load_balance::LoadBalance;
use crate::load_balance::Loadbalancer;
use crate::Result;
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
pub struct Client {
client: GreptimeClient<Channel>,
inner: Arc<Inner>,
}
#[derive(Debug, Default)]
struct Inner {
channel_manager: ChannelManager,
peers: Arc<RwLock<Vec<String>>>,
load_balance: Loadbalancer,
}
impl Inner {
fn with_manager(channel_manager: ChannelManager) -> Self {
Self {
channel_manager,
..Default::default()
}
}
fn set_peers(&self, peers: Vec<String>) {
let mut guard = self.peers.write();
*guard = peers;
}
fn get_peer(&self) -> Option<String> {
let guard = self.peers.read();
self.load_balance.get_peer(&guard).cloned()
}
}
impl Client {
pub async fn connect(url: impl Into<String>) -> Result<Self> {
let url = url.into();
let client = GreptimeClient::connect(url.clone())
.await
.context(error::ConnectFailedSnafu { url })?;
Ok(Self { client })
pub fn new() -> Self {
Default::default()
}
pub fn with_manager(channel_manager: ChannelManager) -> Self {
let inner = Arc::new(Inner::with_manager(channel_manager));
Self { inner }
}
pub fn with_urls<U, A>(urls: A) -> Self
where
U: AsRef<str>,
A: AsRef<[U]>,
{
Self::with_manager_and_urls(ChannelManager::new(), urls)
}
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
where
U: AsRef<str>,
A: AsRef<[U]>,
{
let inner = Inner::with_manager(channel_manager);
let urls: Vec<String> = urls
.as_ref()
.iter()
.map(|peer| peer.as_ref().to_string())
.collect();
inner.set_peers(urls);
Self {
inner: Arc::new(inner),
}
}
pub fn start<U, A>(&self, urls: A)
where
U: AsRef<str>,
A: AsRef<[U]>,
{
let urls: Vec<String> = urls
.as_ref()
.iter()
.map(|peer| peer.as_ref().to_string())
.collect();
self.inner.set_peers(urls);
}
pub async fn admin(&self, req: AdminRequest) -> Result<AdminResponse> {
@@ -48,12 +122,59 @@ impl Client {
}
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
let res = self
.client
.clone()
.batch(req)
.await
.context(error::TonicStatusSnafu)?;
Ok(res.into_inner())
let peer = self
.inner
.get_peer()
.context(error::IllegalGrpcClientStateSnafu {
err_msg: "No available peer found",
})?;
let mut client = self.make_client(peer)?;
let result = client.batch(req).await.context(error::TonicStatusSnafu)?;
Ok(result.into_inner())
}
fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
let addr = addr.as_ref();
let channel = self
.inner
.channel_manager
.get(addr)
.context(error::CreateChannelSnafu { addr })?;
Ok(GreptimeClient::new(channel))
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::Inner;
use crate::load_balance::Loadbalancer;
fn mock_peers() -> Vec<String> {
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
"127.0.0.1:3003".to_string(),
]
}
#[test]
fn test_inner() {
let inner = Inner::default();
assert!(matches!(
inner.load_balance,
Loadbalancer::Random(crate::load_balance::Random)
));
assert!(inner.get_peer().is_none());
let peers = mock_peers();
inner.set_peers(peers.clone());
let all: HashSet<String> = peers.into_iter().collect();
for _ in 0..20 {
assert!(all.contains(&inner.get_peer().unwrap()));
}
}
}

View File

@@ -1,28 +1,34 @@
use std::ops::Deref;
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::codec::SelectResult as GrpcSelectResult;
use api::v1::{
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, PhysicalPlan,
SelectExpr,
column::Values, object_expr, object_result, select_expr, Column, ColumnDataType,
DatabaseRequest, ExprHeader, InsertExpr, MutateResult as GrpcMutateResult, ObjectExpr,
ObjectResult as GrpcObjectResult, PhysicalPlan, SelectExpr,
};
use common_base::BitVec;
use common_error::status_code::StatusCode;
use common_grpc::AsExcutionPlan;
use common_grpc::DefaultAsPlanImpl;
use common_query::Output;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_time::date::Date;
use common_time::datetime::DateTime;
use common_time::timestamp::Timestamp;
use datafusion::physical_plan::ExecutionPlan;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{self, MissingResultSnafu};
use crate::error;
use crate::{
error::DatanodeSnafu, error::DecodeSelectSnafu, error::EncodePhysicalSnafu,
error::MissingHeaderSnafu, Client, Result,
error::{ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu, EncodePhysicalSnafu},
Client, Result,
};
pub const PROTOCOL_VERSION: u32 = 1;
pub type Bytes = Vec<u8>;
#[derive(Clone, Debug)]
pub struct Database {
name: String,
@@ -41,22 +47,33 @@ impl Database {
&self.name
}
pub async fn insert(&self, table: impl Into<String>, values: Vec<Bytes>) -> Result<()> {
pub async fn insert(&self, insert: InsertExpr) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let insert = InsertExpr {
table_name: table.into(),
values,
};
let expr = ObjectExpr {
header: Some(header),
expr: Some(object_expr::Expr::Insert(insert)),
};
self.object(expr).await?.try_into()
}
self.object(expr).await?;
Ok(())
pub async fn batch_insert(&self, insert_exprs: Vec<InsertExpr>) -> Result<Vec<ObjectResult>> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let obj_exprs = insert_exprs
.into_iter()
.map(|expr| ObjectExpr {
header: Some(header.clone()),
expr: Some(object_expr::Expr::Insert(expr)),
})
.collect();
self.objects(obj_exprs)
.await?
.into_iter()
.map(|result| result.try_into())
.collect()
}
pub async fn select(&self, expr: Select) -> Result<ObjectResult> {
@@ -86,6 +103,13 @@ impl Database {
self.do_select(select_expr).await
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
let select_expr = SelectExpr {
expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
};
self.do_select(select_expr).await
}
async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
@@ -97,41 +121,12 @@ impl Database {
};
let obj_result = self.object(expr).await?;
let header = obj_result.header.context(MissingHeaderSnafu)?;
if !StatusCode::is_success(header.code) {
return DatanodeSnafu {
code: header.code,
msg: header.err_msg,
}
.fail();
}
let obj_result = obj_result.result.context(MissingResultSnafu {
name: "select_result".to_string(),
expected: 1_usize,
actual: 0_usize,
})?;
let result = match obj_result {
object_result::Result::Select(select) => {
let result = select
.raw_data
.deref()
.try_into()
.context(DecodeSelectSnafu)?;
ObjectResult::Select(result)
}
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
};
Ok(result)
obj_result.try_into()
}
// TODO(jiachun) update/delete
async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
let res = self.objects(vec![expr]).await?.pop().unwrap();
Ok(res)
}
@@ -165,6 +160,269 @@ pub enum ObjectResult {
Mutate(GrpcMutateResult),
}
impl TryFrom<api::v1::ObjectResult> for ObjectResult {
type Error = error::Error;
fn try_from(object_result: api::v1::ObjectResult) -> std::result::Result<Self, Self::Error> {
let header = object_result.header.context(error::MissingHeaderSnafu)?;
if !StatusCode::is_success(header.code) {
return DatanodeSnafu {
code: header.code,
msg: header.err_msg,
}
.fail();
}
let obj_result = object_result.result.context(error::MissingResultSnafu {
name: "result".to_string(),
expected: 1_usize,
actual: 0_usize,
})?;
Ok(match obj_result {
object_result::Result::Select(select) => {
let result = (*select.raw_data).try_into().context(DecodeSelectSnafu)?;
ObjectResult::Select(result)
}
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
})
}
}
pub enum Select {
Sql(String),
}
impl TryFrom<ObjectResult> for Output {
type Error = error::Error;
fn try_from(value: ObjectResult) -> Result<Self> {
let output = match value {
ObjectResult::Select(select) => {
let vectors = select
.columns
.iter()
.map(|column| column_to_vector(column, select.row_count))
.collect::<Result<Vec<VectorRef>>>()?;
let column_schemas = select
.columns
.iter()
.zip(vectors.iter())
.map(|(column, vector)| {
let datatype = vector.data_type();
// nullable or not, does not affect the output
ColumnSchema::new(&column.column_name, datatype, true)
})
.collect::<Vec<ColumnSchema>>();
let schema = Arc::new(Schema::try_new(column_schemas).context(ConvertSchemaSnafu)?);
let recordbatches = if vectors.is_empty() {
RecordBatches::try_new(schema, vec![])
} else {
RecordBatch::new(schema, vectors)
.and_then(|batch| RecordBatches::try_new(batch.schema.clone(), vec![batch]))
}
.context(error::CreateRecordBatchesSnafu)?;
Output::RecordBatches(recordbatches)
}
ObjectResult::Mutate(mutate) => {
if mutate.failure != 0 {
return error::MutateFailureSnafu {
failure: mutate.failure,
}
.fail();
}
Output::AffectedRows(mutate.success as usize)
}
};
Ok(output)
}
}
fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
let wrapper =
ColumnDataTypeWrapper::try_new(column.datatype).context(error::ColumnDataTypeSnafu)?;
let column_datatype = wrapper.datatype();
let rows = rows as usize;
let mut vector = VectorBuilder::with_capacity(wrapper.into(), rows);
if let Some(values) = &column.values {
let values = collect_column_values(column_datatype, values);
let mut values_iter = values.into_iter();
let null_mask = BitVec::from_slice(&column.null_mask);
let mut nulls_iter = null_mask.iter().by_vals().fuse();
for i in 0..rows {
if let Some(true) = nulls_iter.next() {
vector.push_null();
} else {
let value_ref = values_iter.next().context(error::InvalidColumnProtoSnafu {
err_msg: format!(
"value not found at position {} of column {}",
i, &column.column_name
),
})?;
vector
.try_push_ref(value_ref)
.context(error::CreateVectorSnafu)?;
}
}
} else {
(0..rows).for_each(|_| vector.push_null());
}
Ok(vector.finish())
}
fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Vec<ValueRef> {
macro_rules! collect_values {
($value: expr, $mapper: expr) => {
$value.iter().map($mapper).collect::<Vec<ValueRef>>()
};
}
match column_datatype {
ColumnDataType::Boolean => collect_values!(values.bool_values, |v| ValueRef::from(*v)),
ColumnDataType::Int8 => collect_values!(values.i8_values, |v| ValueRef::from(*v as i8)),
ColumnDataType::Int16 => {
collect_values!(values.i16_values, |v| ValueRef::from(*v as i16))
}
ColumnDataType::Int32 => {
collect_values!(values.i32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Int64 => {
collect_values!(values.i64_values, |v| ValueRef::from(*v as i64))
}
ColumnDataType::Uint8 => {
collect_values!(values.u8_values, |v| ValueRef::from(*v as u8))
}
ColumnDataType::Uint16 => {
collect_values!(values.u16_values, |v| ValueRef::from(*v as u16))
}
ColumnDataType::Uint32 => {
collect_values!(values.u32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint64 => {
collect_values!(values.u64_values, |v| ValueRef::from(*v as u64))
}
ColumnDataType::Float32 => collect_values!(values.f32_values, |v| ValueRef::from(*v)),
ColumnDataType::Float64 => collect_values!(values.f64_values, |v| ValueRef::from(*v)),
ColumnDataType::Binary => {
collect_values!(values.binary_values, |v| ValueRef::from(v.as_slice()))
}
ColumnDataType::String => {
collect_values!(values.string_values, |v| ValueRef::from(v.as_str()))
}
ColumnDataType::Date => {
collect_values!(values.date_values, |v| ValueRef::Date(Date::new(*v)))
}
ColumnDataType::Datetime => {
collect_values!(values.datetime_values, |v| ValueRef::DateTime(
DateTime::new(*v)
))
}
ColumnDataType::Timestamp => {
collect_values!(values.ts_millis_values, |v| ValueRef::Timestamp(
Timestamp::from_millis(*v)
))
}
}
}
#[cfg(test)]
mod tests {
use datanode::server::grpc::select::{null_mask, values};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
UInt32Vector, UInt64Vector, UInt8Vector,
};
use super::*;
#[test]
fn test_column_to_vector() {
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
column.datatype = -100;
let result = column_to_vector(&column, 1);
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Column datatype error, source: Unknown proto column datatype: -100"
);
macro_rules! test_with_vector {
($vector: expr) => {
let vector = Arc::new($vector);
let column = create_test_column(vector.clone());
let result = column_to_vector(&column, vector.len() as u32).unwrap();
assert_eq!(result, vector as VectorRef);
};
}
test_with_vector!(BooleanVector::from(vec![Some(true), None, Some(false)]));
test_with_vector!(Int8Vector::from(vec![Some(i8::MIN), None, Some(i8::MAX)]));
test_with_vector!(Int16Vector::from(vec![
Some(i16::MIN),
None,
Some(i16::MAX)
]));
test_with_vector!(Int32Vector::from(vec![
Some(i32::MIN),
None,
Some(i32::MAX)
]));
test_with_vector!(Int64Vector::from(vec![
Some(i64::MIN),
None,
Some(i64::MAX)
]));
test_with_vector!(UInt8Vector::from(vec![Some(u8::MIN), None, Some(u8::MAX)]));
test_with_vector!(UInt16Vector::from(vec![
Some(u16::MIN),
None,
Some(u16::MAX)
]));
test_with_vector!(UInt32Vector::from(vec![
Some(u32::MIN),
None,
Some(u32::MAX)
]));
test_with_vector!(UInt64Vector::from(vec![
Some(u64::MIN),
None,
Some(u64::MAX)
]));
test_with_vector!(Float32Vector::from(vec![
Some(f32::MIN),
None,
Some(f32::MAX)
]));
test_with_vector!(Float64Vector::from(vec![
Some(f64::MIN),
None,
Some(f64::MAX)
]));
test_with_vector!(BinaryVector::from(vec![
Some(b"".to_vec()),
None,
Some(b"hello".to_vec())
]));
test_with_vector!(StringVector::from(vec![Some(""), None, Some("foo"),]));
test_with_vector!(DateVector::from(vec![Some(1), None, Some(3)]));
test_with_vector!(DateTimeVector::from(vec![Some(4), None, Some(6)]));
}
fn create_test_column(vector: VectorRef) -> Column {
let wrapper: ColumnDataTypeWrapper = vector.data_type().try_into().unwrap();
let array = vector.to_arrow_array();
Column {
column_name: "test".to_string(),
semantic_type: 1,
values: Some(values(&[array.clone()]).unwrap()),
null_mask: null_mask(&vec![array], vector.len()),
datatype: wrapper.datatype() as i32,
}
}
}

View File

@@ -1,3 +1,4 @@
use std::any::Any;
use std::sync::Arc;
use api::serde::DecodeError;
@@ -42,6 +43,91 @@ pub enum Error {
#[snafu(backtrace)]
source: common_grpc::Error,
},
#[snafu(display("Mutate result has failure {}", failure))]
MutateFailure { failure: u32, backtrace: Backtrace },
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto {
err_msg: String,
backtrace: Backtrace,
},
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
source: api::error::Error,
},
#[snafu(display("Failed to create vector, source: {}", source))]
CreateVector {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Failed to create RecordBatches, source: {}", source))]
CreateRecordBatches {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState {
err_msg: String,
backtrace: Backtrace,
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, backtrace: Backtrace },
#[snafu(display("Failed to convert schema, source: {}", source))]
ConvertSchema {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
addr,
source
))]
CreateChannel {
addr: String,
#[snafu(backtrace)]
source: common_grpc::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::ConnectFailed { .. }
| Error::MissingResult { .. }
| Error::MissingHeader { .. }
| Error::TonicStatus { .. }
| Error::DecodeSelect { .. }
| Error::Datanode { .. }
| Error::EncodePhysical { .. }
| Error::MutateFailure { .. }
| Error::InvalidColumnProto { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
Error::ConvertSchema { source } | Error::CreateVector { source } => {
source.status_code()
}
Error::CreateRecordBatches { source } => source.status_code(),
Error::CreateChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -1,6 +1,8 @@
pub mod admin;
mod client;
mod database;
mod error;
pub mod load_balance;
pub use self::{
client::Client,

View File

@@ -0,0 +1,52 @@
use enum_dispatch::enum_dispatch;
use rand::seq::SliceRandom;
#[enum_dispatch]
pub trait LoadBalance {
fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String>;
}
#[enum_dispatch(LoadBalance)]
#[derive(Debug)]
pub enum Loadbalancer {
Random,
}
impl Default for Loadbalancer {
fn default() -> Self {
Loadbalancer::from(Random)
}
}
#[derive(Debug)]
pub struct Random;
impl LoadBalance for Random {
fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String> {
peers.choose(&mut rand::thread_rng())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::{LoadBalance, Random};
#[test]
fn test_random_lb() {
let peers = vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
"127.0.0.1:3003".to_string(),
"127.0.0.1:3004".to_string(),
];
let all: HashSet<String> = peers.clone().into_iter().collect();
let random = Random;
for _ in 0..100 {
let peer = random.get_peer(&peers).unwrap();
all.contains(peer);
}
}
}

View File

@@ -10,8 +10,11 @@ path = "src/bin/greptime.rs"
[dependencies]
clap = { version = "3.1", features = ["derive"] }
common-error = { path = "../common/error" }
common-telemetry = { path = "../common/telemetry", features = ["deadlock_detection"]}
common-telemetry = { path = "../common/telemetry", features = ["deadlock_detection"] }
datanode = { path = "../datanode" }
frontend = { path = "../frontend" }
futures = "0.3"
meta-srv = { path = "../meta-srv" }
snafu = { version = "0.7", features = ["backtraces"] }
tokio = { version = "1.18", features = ["full"] }
toml = "0.5"

View File

@@ -3,7 +3,10 @@ use std::fmt;
use clap::Parser;
use cmd::datanode;
use cmd::error::Result;
use common_telemetry::{self, logging::error, logging::info};
use cmd::frontend;
use cmd::metasrv;
use common_telemetry::logging::error;
use common_telemetry::logging::info;
#[derive(Parser)]
#[clap(name = "greptimedb")]
@@ -26,12 +29,18 @@ impl Command {
enum SubCommand {
#[clap(name = "datanode")]
Datanode(datanode::Command),
#[clap(name = "frontend")]
Frontend(frontend::Command),
#[clap(name = "metasrv")]
Metasrv(metasrv::Command),
}
impl SubCommand {
async fn run(self) -> Result<()> {
match self {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
}
}
}
@@ -40,6 +49,8 @@ impl fmt::Display for SubCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SubCommand::Datanode(..) => write!(f, "greptime-datanode"),
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
}
}
}

View File

@@ -39,6 +39,8 @@ struct StartCommand {
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
}
@@ -78,6 +80,9 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_addr = addr;
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_addr = addr;
}
Ok(opts)
}
@@ -95,6 +100,7 @@ mod tests {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
postgres_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
@@ -106,6 +112,10 @@ mod tests {
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
assert_eq!("0.0.0.0:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
assert_eq!("0.0.0.0:5432".to_string(), options.postgres_addr);
assert_eq!(4, options.postgres_runtime_size);
match options.storage {
ObjectStoreConfig::File { data_dir } => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)

View File

@@ -11,14 +11,30 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
source: std::io::Error,
path: String,
source: std::io::Error,
backtrace: Backtrace,
},
#[snafu(display("Failed to parse config, source: {}", source))]
ParseConfig { source: toml::de::Error },
ParseConfig {
source: toml::de::Error,
backtrace: Backtrace,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -27,6 +43,8 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } => StatusCode::InvalidArguments,
}
}
@@ -44,18 +62,68 @@ impl ErrorExt for Error {
mod tests {
use super::*;
fn raise_read_config_error() -> std::result::Result<(), std::io::Error> {
Err(std::io::ErrorKind::NotFound.into())
type StdResult<E> = std::result::Result<(), E>;
#[test]
fn test_start_node_error() {
fn throw_datanode_error() -> StdResult<datanode::error::Error> {
datanode::error::MissingFieldSnafu {
field: "test_field",
}
.fail()
}
let e = throw_datanode_error()
.context(StartDatanodeSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_error() {
let e = raise_read_config_error()
fn test_start_frontend_error() {
fn throw_frontend_error() -> StdResult<frontend::error::Error> {
frontend::error::InvalidSqlSnafu { err_msg: "failed" }.fail()
}
let e = throw_frontend_error()
.context(StartFrontendSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_start_metasrv_error() {
fn throw_metasrv_error() -> StdResult<meta_srv::error::Error> {
meta_srv::error::StreamNoneSnafu {}.fail()
}
let e = throw_metasrv_error()
.context(StartMetaServerSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::Internal);
}
#[test]
fn test_read_config_error() {
fn throw_read_config_error() -> StdResult<std::io::Error> {
Err(std::io::ErrorKind::NotFound.into())
}
let e = throw_read_config_error()
.context(ReadConfigSnafu { path: "test" })
.err()
.unwrap();
assert!(e.backtrace_opt().is_none());
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
}

157
src/cmd/src/frontend.rs Normal file
View File

@@ -0,0 +1,157 @@
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
subcmd: SubCommand,
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
}
}
#[derive(Parser)]
enum SubCommand {
Start(StartCommand),
}
impl SubCommand {
async fn run(self) -> Result<()> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
}
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
grpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(short, long)]
influxdb_enable: Option<bool>,
}
impl StartCommand {
async fn run(self) -> Result<()> {
let opts = self.try_into()?;
let mut frontend = Frontend::new(opts, Instance::new());
frontend.start().await.context(error::StartFrontendSnafu)
}
}
impl TryFrom<StartCommand> for FrontendOptions {
type Error = error::Error;
fn try_from(cmd: StartCommand) -> Result<Self> {
let mut opts: FrontendOptions = if let Some(path) = cmd.config_file {
toml_loader::from_file!(&path)?
} else {
FrontendOptions::default()
};
if let Some(addr) = cmd.http_addr {
opts.http_addr = Some(addr);
}
if let Some(addr) = cmd.grpc_addr {
opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.opentsdb_addr {
opts.opentsdb_options = Some(OpentsdbOptions {
addr,
..Default::default()
});
}
if let Some(enable) = cmd.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
Ok(opts)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_from_start_command() {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
influxdb_enable: Some(false),
config_file: None,
};
let opts: FrontendOptions = command.try_into().unwrap();
assert_eq!(opts.http_addr, Some("127.0.0.1:1234".to_string()));
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
assert_eq!(
opts.postgres_options.as_ref().unwrap().addr,
"127.0.0.1:5432"
);
assert_eq!(
opts.opentsdb_options.as_ref().unwrap().addr,
"127.0.0.1:4321"
);
let default_opts = FrontendOptions::default();
assert_eq!(
opts.grpc_options.unwrap().addr,
default_opts.grpc_options.unwrap().addr
);
assert_eq!(
opts.mysql_options.as_ref().unwrap().runtime_size,
default_opts.mysql_options.as_ref().unwrap().runtime_size
);
assert_eq!(
opts.postgres_options.as_ref().unwrap().runtime_size,
default_opts.postgres_options.as_ref().unwrap().runtime_size
);
assert_eq!(
opts.opentsdb_options.as_ref().unwrap().runtime_size,
default_opts.opentsdb_options.as_ref().unwrap().runtime_size
);
assert!(!opts.influxdb_options.unwrap().enable);
}
}

View File

@@ -1,3 +1,5 @@
pub mod datanode;
pub mod error;
pub mod frontend;
pub mod metasrv;
mod toml_loader;

122
src/cmd/src/metasrv.rs Normal file
View File

@@ -0,0 +1,122 @@
use clap::Parser;
use common_telemetry::logging;
use meta_srv::bootstrap;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error;
use crate::error::Error;
use crate::error::Result;
use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
subcmd: SubCommand,
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
}
}
#[derive(Parser)]
enum SubCommand {
Start(StartCommand),
}
impl SubCommand {
async fn run(self) -> Result<()> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
}
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
bind_addr: Option<String>,
#[clap(long)]
server_addr: Option<String>,
#[clap(long)]
store_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
}
impl StartCommand {
async fn run(self) -> Result<()> {
logging::info!("MetaSrv start command: {:#?}", self);
let opts: MetaSrvOptions = self.try_into()?;
logging::info!("MetaSrv options: {:#?}", opts);
bootstrap::bootstrap_meta_srv(opts)
.await
.context(error::StartMetaServerSnafu)
}
}
impl TryFrom<StartCommand> for MetaSrvOptions {
type Error = Error;
fn try_from(cmd: StartCommand) -> Result<Self> {
let mut opts: MetaSrvOptions = if let Some(path) = cmd.config_file {
toml_loader::from_file!(&path)?
} else {
MetaSrvOptions::default()
};
if let Some(addr) = cmd.bind_addr {
opts.bind_addr = addr;
}
if let Some(addr) = cmd.server_addr {
opts.server_addr = addr;
}
if let Some(addr) = cmd.store_addr {
opts.store_addr = addr;
}
Ok(opts)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_read_from_cmd() {
let cmd = StartCommand {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("0.0.0.0:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
config_file: None,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
}
#[test]
fn test_read_from_config_file() {
let cmd = StartCommand {
bind_addr: None,
server_addr: None,
store_addr: None,
config_file: Some(format!(
"{}/../../config/metasrv.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
assert_eq!(30, options.datanode_lease_secs);
}
}

View File

@@ -0,0 +1,22 @@
[package]
name = "common-catalog"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-trait = "0.1"
common-error = { path = "../error" }
common-telemetry = { path = "../telemetry" }
datatypes = { path = "../../datatypes" }
lazy_static = "1.4"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
table = { path = "../../table" }
[dev-dependencies]
chrono = "0.4"
tempdir = "0.3"
tokio = { version = "1.0", features = ["full"] }

View File

@@ -0,0 +1,17 @@
pub const SYSTEM_CATALOG_NAME: &str = "system";
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
pub const DEFAULT_SCHEMA_NAME: &str = "public";
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
/// User defined table id starts from this value.
pub const MIN_USER_TABLE_ID: u32 = 1024;
/// system_catalog table id
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
/// scripts table id
pub const SCRIPTS_TABLE_ID: u32 = 1;
pub(crate) const CATALOG_KEY_PREFIX: &str = "__c";
pub(crate) const SCHEMA_KEY_PREFIX: &str = "__s";
pub(crate) const TABLE_KEY_PREFIX: &str = "__t";

View File

@@ -0,0 +1,49 @@
use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::prelude::{Snafu, StatusCode};
use snafu::{Backtrace, ErrorCompat};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Invalid catalog info: {}", key))]
InvalidCatalog { key: String, backtrace: Backtrace },
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
DeserializeCatalogEntryValue {
raw: String,
backtrace: Backtrace,
source: serde_json::error::Error,
},
#[snafu(display("Failed to serialize catalog entry value"))]
SerializeCatalogEntryValue {
backtrace: Backtrace,
source: serde_json::error::Error,
},
#[snafu(display("Failed to parse node id: {}", key))]
ParseNodeId { key: String, backtrace: Backtrace },
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidCatalog { .. }
| Error::DeserializeCatalogEntryValue { .. }
| Error::SerializeCatalogEntryValue { .. } => StatusCode::Unexpected,
Error::ParseNodeId { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -0,0 +1,293 @@
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize, Serializer};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableMeta, TableId, TableVersion};
use crate::consts::{CATALOG_KEY_PREFIX, SCHEMA_KEY_PREFIX, TABLE_KEY_PREFIX};
use crate::error::{
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, ParseNodeIdSnafu,
SerializeCatalogEntryValueSnafu,
};
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex =
Regex::new(&format!("^{}-([a-zA-Z_]+)-([0-9]+)$", CATALOG_KEY_PREFIX)).unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-([a-zA-Z_]+)-([a-zA-Z_]+)-([0-9]+)$",
SCHEMA_KEY_PREFIX
))
.unwrap();
}
lazy_static! {
static ref TABLE_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-([a-zA-Z_]+)-([a-zA-Z_]+)-([a-zA-Z_]+)-([0-9]+)-([0-9]+)$",
TABLE_KEY_PREFIX
))
.unwrap();
}
pub fn build_catalog_prefix() -> String {
format!("{}-", CATALOG_KEY_PREFIX)
}
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
}
pub fn build_table_prefix(catalog_name: impl AsRef<str>, schema_name: impl AsRef<str>) -> String {
format!(
"{}-{}-{}-",
TABLE_KEY_PREFIX,
catalog_name.as_ref(),
schema_name.as_ref()
)
}
pub struct TableKey {
pub catalog_name: String,
pub schema_name: String,
pub table_name: String,
pub version: TableVersion,
pub node_id: u64,
}
impl Display for TableKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(TABLE_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)?;
f.write_str("-")?;
f.write_str(&self.table_name)?;
f.write_str("-")?;
f.serialize_u64(self.version)?;
f.write_str("-")?;
f.serialize_u64(self.node_id)
}
}
impl TableKey {
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
let key = s.as_ref();
let captures = TABLE_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 6, InvalidCatalogSnafu { key });
let version =
u64::from_str(&captures[4]).map_err(|_| InvalidCatalogSnafu { key }.build())?;
let node_id_str = captures[5].to_string();
let node_id = u64::from_str(&node_id_str)
.map_err(|_| ParseNodeIdSnafu { key: node_id_str }.build())?;
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
table_name: captures[3].to_string(),
version,
node_id,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TableValue {
pub id: TableId,
pub node_id: u64,
pub regions_ids: Vec<u64>,
pub meta: RawTableMeta,
}
impl TableValue {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
serde_json::from_str(s.as_ref())
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
}
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
Ok(serde_json::to_string(self)
.context(SerializeCatalogEntryValueSnafu)?
.into_bytes())
}
}
pub struct CatalogKey {
pub catalog_name: String,
pub node_id: u64,
}
impl Display for CatalogKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(CATALOG_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.serialize_u64(self.node_id)
}
}
impl CatalogKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = CATALOG_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
let node_id_str = captures[2].to_string();
let node_id = u64::from_str(&node_id_str)
.map_err(|_| ParseNodeIdSnafu { key: node_id_str }.build())?;
Ok(Self {
catalog_name: captures[1].to_string(),
node_id,
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CatalogValue;
impl CatalogValue {
pub fn to_bytes(&self) -> Result<Vec<u8>, Error> {
Ok(serde_json::to_string(self)
.context(SerializeCatalogEntryValueSnafu)?
.into_bytes())
}
}
pub struct SchemaKey {
pub catalog_name: String,
pub schema_name: String,
pub node_id: u64,
}
impl Display for SchemaKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(SCHEMA_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)?;
f.write_str("-")?;
f.serialize_u64(self.node_id)
}
}
impl SchemaKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = SCHEMA_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 4, InvalidCatalogSnafu { key });
let node_id_str = captures[3].to_string();
let node_id = u64::from_str(&node_id_str)
.map_err(|_| ParseNodeIdSnafu { key: node_id_str }.build())?;
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
node_id,
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SchemaValue;
impl SchemaValue {
pub fn to_bytes(&self) -> Result<Vec<u8>, Error> {
Ok(serde_json::to_string(self)
.context(SerializeCatalogEntryValueSnafu)?
.into_bytes())
}
}
#[cfg(test)]
mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
use super::*;
#[test]
fn test_parse_catalog_key() {
let key = "__c-C-2";
let catalog_key = CatalogKey::parse(key).unwrap();
assert_eq!("C", catalog_key.catalog_name);
assert_eq!(2, catalog_key.node_id);
assert_eq!(key, catalog_key.to_string());
}
#[test]
fn test_parse_schema_key() {
let key = "__s-C-S-3";
let schema_key = SchemaKey::parse(key).unwrap();
assert_eq!("C", schema_key.catalog_name);
assert_eq!("S", schema_key.schema_name);
assert_eq!(3, schema_key.node_id);
assert_eq!(key, schema_key.to_string());
}
#[test]
fn test_parse_table_key() {
let key = "__t-C-S-T-42-1";
let entry = TableKey::parse(key).unwrap();
assert_eq!("C", entry.catalog_name);
assert_eq!("S", entry.schema_name);
assert_eq!("T", entry.table_name);
assert_eq!(1, entry.node_id);
assert_eq!(42, entry.version);
assert_eq!(key, &entry.to_string());
}
#[test]
fn test_build_prefix() {
assert_eq!("__c-", build_catalog_prefix());
assert_eq!("__s-CATALOG-", build_schema_prefix("CATALOG"));
assert_eq!(
"__t-CATALOG-SCHEMA-",
build_table_prefix("CATALOG", "SCHEMA")
);
}
#[test]
fn test_serialize_schema() {
let schema = Schema::new(vec![ColumnSchema::new(
"name",
ConcreteDataType::string_datatype(),
true,
)]);
let meta = RawTableMeta {
schema: RawSchema::from(&schema),
engine: "mito".to_string(),
created_on: chrono::DateTime::default(),
primary_key_indices: vec![0, 1],
next_column_id: 3,
engine_options: Default::default(),
value_indices: vec![2, 3],
options: Default::default(),
};
let value = TableValue {
id: 42,
node_id: 32,
regions_ids: vec![1, 2, 3],
meta,
};
let serialized = serde_json::to_string(&value).unwrap();
let deserialized = TableValue::parse(&serialized).unwrap();
assert_eq!(value, deserialized);
}
}

View File

@@ -0,0 +1,8 @@
pub mod consts;
pub mod error;
mod helper;
pub use helper::{
build_catalog_prefix, build_schema_prefix, build_table_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableKey, TableValue,
};

View File

@@ -2,7 +2,6 @@
name = "common-error"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@@ -6,45 +6,47 @@ pub enum StatusCode {
// ====== Begin of common status code ==============
/// Success.
Success = 0,
/// Unknown error.
Unknown = 1,
Unknown = 1000,
/// Unsupported operation.
Unsupported = 2,
Unsupported = 1001,
/// Unexpected error, maybe there is a BUG.
Unexpected = 3,
Unexpected = 1002,
/// Internal server error.
Internal = 4,
Internal = 1003,
/// Invalid arguments.
InvalidArguments = 5,
InvalidArguments = 1004,
// ====== End of common status code ================
// ====== Begin of SQL related status code =========
/// SQL Syntax error.
InvalidSyntax = 6,
InvalidSyntax = 2000,
// ====== End of SQL related status code ===========
// ====== Begin of query related status code =======
/// Fail to create a plan for the query.
PlanQuery = 7,
PlanQuery = 3000,
/// The query engine fail to execute query.
EngineExecuteQuery = 8,
EngineExecuteQuery = 3001,
// ====== End of query related status code =========
// ====== Begin of catalog related status code =====
/// Table already exists.
TableAlreadyExists = 9,
TableNotFound = 10,
TableColumnNotFound = 11,
TableAlreadyExists = 4000,
TableNotFound = 4001,
TableColumnNotFound = 4002,
TableColumnExists = 4003,
// ====== End of catalog related status code =======
// ====== Begin of storage related status code =====
/// Storage is temporarily unable to handle the request
StorageUnavailable = 12,
StorageUnavailable = 5000,
// ====== End of storage related status code =======
// ====== Begin of server related status code =====
/// Runtime resources exhausted, like creating threads failed.
RuntimeResourcesExhausted = 13,
RuntimeResourcesExhausted = 6000,
// ====== End of server related status code =======
}

View File

@@ -0,0 +1,18 @@
[package]
name = "common-function-macro"
version = "0.1.0"
edition = "2021"
[lib]
proc-macro = true
[dependencies]
quote = "1.0"
syn = "1.0"
[dev-dependencies]
arc-swap = "1.0"
common-query = { path = "../query" }
datatypes = { path = "../../datatypes" }
snafu = { version = "0.7", features = ["backtraces"] }
static_assertions = "1.1.0"

View File

@@ -0,0 +1,71 @@
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use syn::parse::Parser;
use syn::spanned::Spanned;
use syn::{parse_macro_input, DeriveInput, ItemStruct};
/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
#[proc_macro_derive(AggrFuncTypeStore)]
pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as DeriveInput);
impl_aggr_func_type_store(&ast)
}
fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream {
let name = &ast.ident;
let gen = quote! {
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
use common_query::error::{InvalidInputStateSnafu, Error as QueryError};
use datatypes::prelude::ConcreteDataType;
impl AggrFuncTypeStore for #name {
fn input_types(&self) -> std::result::Result<Vec<ConcreteDataType>, QueryError> {
let input_types = self.input_types.load();
snafu::ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> std::result::Result<(), QueryError> {
let old = self.input_types.swap(Some(std::sync::Arc::new(input_types.clone())));
if let Some(old) = old {
snafu::ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
snafu::ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
}
};
gen.into()
}
/// A struct can be used as a creator for aggregate function if it has been annotated with this
/// attribute first. This attribute add a necessary field which is intended to store the input
/// data's types to the struct.
/// This attribute is expected to be used along with derive macro [AggrFuncTypeStore].
#[proc_macro_attribute]
pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStream {
let mut item_struct = parse_macro_input!(input as ItemStruct);
if let syn::Fields::Named(ref mut fields) = item_struct.fields {
let result = syn::Field::parse_named.parse2(quote! {
input_types: arc_swap::ArcSwapOption<Vec<ConcreteDataType>>
});
match result {
Ok(field) => fields.named.push(field),
Err(e) => return e.into_compile_error().into(),
}
} else {
return quote_spanned!(
item_struct.fields.span() => compile_error!(
"This attribute macro needs to add fields to the its annotated struct, \
so the struct must have \"{}\".")
)
.into();
}
quote! {
#item_struct
}
.into()
}

View File

@@ -0,0 +1,14 @@
use common_function_macro::as_aggr_func_creator;
use common_function_macro::AggrFuncTypeStore;
use static_assertions::{assert_fields, assert_impl_all};
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
struct Foo {}
#[test]
fn test_derive() {
Foo::default();
assert_fields!(Foo: input_types);
assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore);
}

View File

@@ -3,32 +3,27 @@ edition = "2021"
name = "common-function"
version = "0.1.0"
[dependencies.arrow]
features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"]
package = "arrow2"
version = "0.10"
[dependencies]
arc-swap = "1.0"
chrono-tz = "0.6"
common-error = { path = "../error" }
common-function-macro = { path = "../function-macro" }
common-query = { path = "../query" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datatypes = { path = "../../datatypes" }
libc = "0.2"
num = "0.4"
num-traits = "0.2"
once_cell = "1.10"
paste = "1.0"
rustpython-ast = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
rustpython-bytecode = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
rustpython-compiler = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
rustpython-compiler-core = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
rustpython-parser = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
rustpython-vm = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"}
snafu = { version = "0.7", features = ["backtraces"] }
statrs = "0.15"
[dependencies.arrow]
features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"]
package = "arrow2"
version = "0.10"
[dev-dependencies]
ron = "0.7"
serde = {version = "1.0", features = ["derive"]}
serde = { version = "1.0", features = ["derive"] }

View File

@@ -1,10 +1,8 @@
use std::cmp::Ordering;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_query::error::{
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, InvalidInputStateSnafu, Result,
};
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::vectors::ConstantVector;
@@ -98,10 +96,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct ArgmaxAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct ArgmaxAccumulatorCreator {}
impl AggregateFunctionCreator for ArgmaxAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -124,23 +121,6 @@ impl AggregateFunctionCreator for ArgmaxAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::uint64_datatype())
}

View File

@@ -1,10 +1,8 @@
use std::cmp::Ordering;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_query::error::{
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, InvalidInputStateSnafu, Result,
};
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::vectors::ConstantVector;
@@ -107,10 +105,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct ArgminAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct ArgminAccumulatorCreator {}
impl AggregateFunctionCreator for ArgminAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -133,23 +130,6 @@ impl AggregateFunctionCreator for ArgminAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::uint32_datatype())
}

View File

@@ -1,10 +1,9 @@
use std::marker::PhantomData;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, InvalidInputStateSnafu,
Result,
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
@@ -118,10 +117,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct DiffAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct DiffAccumulatorCreator {}
impl AggregateFunctionCreator for DiffAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -144,31 +142,13 @@ impl AggregateFunctionCreator for DiffAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() != input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 1, InvalidInputStateSnafu);
with_match_primitive_type_id!(
input_types[0].logical_type_id(),
|$S| {
Ok(ConcreteDataType::list_datatype(PrimitiveType::<<$S as Primitive>::LargestType>::default().logical_type_id().data_type()))
Ok(ConcreteDataType::list_datatype(PrimitiveType::<<$S as Primitive>::LargestType>::default().into()))
},
{
unreachable!()
@@ -182,7 +162,7 @@ impl AggregateFunctionCreator for DiffAccumulatorCreator {
with_match_primitive_type_id!(
input_types[0].logical_type_id(),
|$S| {
Ok(vec![ConcreteDataType::list_datatype(PrimitiveType::<$S>::default().logical_type_id().data_type())])
Ok(vec![ConcreteDataType::list_datatype(PrimitiveType::<$S>::default().into())])
},
{
unreachable!()

View File

@@ -1,10 +1,9 @@
use std::marker::PhantomData;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu, InvalidInputStateSnafu,
Result,
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
@@ -125,10 +124,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct MeanAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct MeanAccumulatorCreator {}
impl AggregateFunctionCreator for MeanAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -151,23 +149,6 @@ impl AggregateFunctionCreator for MeanAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 1, InvalidInputStateSnafu);

View File

@@ -2,17 +2,17 @@ use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, InvalidInputStateSnafu,
Result,
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::prelude::*;
use datatypes::types::OrdPrimitive;
use datatypes::value::ListValue;
use datatypes::vectors::{ConstantVector, ListVector};
use datatypes::with_match_ordered_primitive_type_id;
use datatypes::with_match_primitive_type_id;
use num::NumCast;
use snafu::{ensure, OptionExt, ResultExt};
@@ -37,17 +37,19 @@ use snafu::{ensure, OptionExt, ResultExt};
#[derive(Debug, Default)]
pub struct Median<T>
where
T: Primitive + Ord,
T: Primitive,
{
greater: BinaryHeap<Reverse<T>>,
not_greater: BinaryHeap<T>,
greater: BinaryHeap<Reverse<OrdPrimitive<T>>>,
not_greater: BinaryHeap<OrdPrimitive<T>>,
}
impl<T> Median<T>
where
T: Primitive + Ord,
T: Primitive,
{
fn push(&mut self, value: T) {
let value = OrdPrimitive::<T>(value);
if self.not_greater.is_empty() {
self.not_greater.push(value);
return;
@@ -71,7 +73,7 @@ where
// to use them.
impl<T> Accumulator for Median<T>
where
T: Primitive + Ord,
T: Primitive,
for<'a> T: Scalar<RefType<'a> = T>,
{
// This function serializes our state to `ScalarValue`, which DataFusion uses to pass this
@@ -166,8 +168,8 @@ where
let greater = self.greater.peek().unwrap();
// the following three NumCast's `unwrap`s are safe because T is primitive
let not_greater_v: f64 = NumCast::from(not_greater).unwrap();
let greater_v: f64 = NumCast::from(greater.0).unwrap();
let not_greater_v: f64 = NumCast::from(not_greater.as_primitive()).unwrap();
let greater_v: f64 = NumCast::from(greater.0.as_primitive()).unwrap();
let median: T = NumCast::from((not_greater_v + greater_v) / 2.0).unwrap();
median.into()
};
@@ -175,16 +177,15 @@ where
}
}
#[derive(Debug, Default)]
pub struct MedianAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct MedianAccumulatorCreator {}
impl AggregateFunctionCreator for MedianAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
let input_type = &types[0];
with_match_ordered_primitive_type_id!(
with_match_primitive_type_id!(
input_type.logical_type_id(),
|$S| {
Ok(Box::new(Median::<$S>::default()))
@@ -201,23 +202,6 @@ impl AggregateFunctionCreator for MedianAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 1, InvalidInputStateSnafu);

View File

@@ -64,55 +64,24 @@ pub(crate) struct AggregateFunctions;
impl AggregateFunctions {
pub fn register(registry: &FunctionRegistry) {
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"median",
1,
Arc::new(|| Arc::new(MedianAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"diff",
1,
Arc::new(|| Arc::new(DiffAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"mean",
1,
Arc::new(|| Arc::new(MeanAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"polyval",
2,
Arc::new(|| Arc::new(PolyvalAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"argmax",
1,
Arc::new(|| Arc::new(ArgmaxAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"argmin",
1,
Arc::new(|| Arc::new(ArgminAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"diff",
1,
Arc::new(|| Arc::new(DiffAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"percentile",
2,
Arc::new(|| Arc::new(PercentileAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"scipystatsnormcdf",
2,
Arc::new(|| Arc::new(ScipyStatsNormCdfAccumulatorCreator::default())),
)));
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
"scipystatsnormpdf",
2,
Arc::new(|| Arc::new(ScipyStatsNormPdfAccumulatorCreator::default())),
)));
macro_rules! register_aggr_func {
($name :expr, $arg_count :expr, $creator :ty) => {
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
$name,
$arg_count,
Arc::new(|| Arc::new(<$creator>::default())),
)));
};
}
register_aggr_func!("median", 1, MedianAccumulatorCreator);
register_aggr_func!("diff", 1, DiffAccumulatorCreator);
register_aggr_func!("mean", 1, MeanAccumulatorCreator);
register_aggr_func!("polyval", 2, PolyvalAccumulatorCreator);
register_aggr_func!("argmax", 1, ArgmaxAccumulatorCreator);
register_aggr_func!("argmin", 1, ArgminAccumulatorCreator);
register_aggr_func!("percentile", 2, PercentileAccumulatorCreator);
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
}
}

View File

@@ -2,17 +2,18 @@ use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, InvalidInputColSnafu, InvalidInputStateSnafu, Result,
FromScalarValueSnafu, InvalidInputColSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::prelude::*;
use datatypes::types::OrdPrimitive;
use datatypes::value::{ListValue, OrderedFloat};
use datatypes::vectors::{ConstantVector, Float64Vector, ListVector};
use datatypes::with_match_ordered_primitive_type_id;
use datatypes::with_match_primitive_type_id;
use num::NumCast;
use snafu::{ensure, OptionExt, ResultExt};
@@ -37,19 +38,21 @@ use snafu::{ensure, OptionExt, ResultExt};
#[derive(Debug, Default)]
pub struct Percentile<T>
where
T: Primitive + Ord,
T: Primitive,
{
greater: BinaryHeap<Reverse<T>>,
not_greater: BinaryHeap<T>,
greater: BinaryHeap<Reverse<OrdPrimitive<T>>>,
not_greater: BinaryHeap<OrdPrimitive<T>>,
n: u64,
p: Option<f64>,
}
impl<T> Percentile<T>
where
T: Primitive + Ord,
T: Primitive,
{
fn push(&mut self, value: T) {
let value = OrdPrimitive::<T>(value);
self.n += 1;
if self.not_greater.is_empty() {
self.not_greater.push(value);
@@ -76,7 +79,7 @@ where
impl<T> Accumulator for Percentile<T>
where
T: Primitive + Ord,
T: Primitive,
for<'a> T: Scalar<RefType<'a> = T>,
{
fn state(&self) -> Result<Vec<Value>> {
@@ -212,7 +215,7 @@ where
if not_greater.is_none() {
return Ok(Value::Null);
}
let not_greater = *self.not_greater.peek().unwrap();
let not_greater = (*self.not_greater.peek().unwrap()).as_primitive();
let percentile = if self.greater.is_empty() {
NumCast::from(not_greater).unwrap()
} else {
@@ -224,23 +227,22 @@ where
};
let fract = (((self.n - 1) as f64) * p / 100_f64).fract();
let not_greater_v: f64 = NumCast::from(not_greater).unwrap();
let greater_v: f64 = NumCast::from(greater.0).unwrap();
let greater_v: f64 = NumCast::from(greater.0.as_primitive()).unwrap();
not_greater_v * (1.0 - fract) + greater_v * fract
};
Ok(Value::from(percentile))
}
}
#[derive(Debug, Default)]
pub struct PercentileAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct PercentileAccumulatorCreator {}
impl AggregateFunctionCreator for PercentileAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
let input_type = &types[0];
with_match_ordered_primitive_type_id!(
with_match_primitive_type_id!(
input_type.logical_type_id(),
|$S| {
Ok(Box::new(Percentile::<$S>::default()))
@@ -257,23 +259,6 @@ impl AggregateFunctionCreator for PercentileAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 2, InvalidInputStateSnafu);

View File

@@ -1,10 +1,10 @@
use std::marker::PhantomData;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, InvalidInputColSnafu, InvalidInputStateSnafu, Result,
FromScalarValueSnafu, InvalidInputColSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
@@ -187,10 +187,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct PolyvalAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct PolyvalAccumulatorCreator {}
impl AggregateFunctionCreator for PolyvalAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -213,23 +212,6 @@ impl AggregateFunctionCreator for PolyvalAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 2, InvalidInputStateSnafu);
@@ -237,7 +219,7 @@ impl AggregateFunctionCreator for PolyvalAccumulatorCreator {
with_match_primitive_type_id!(
input_type,
|$S| {
Ok(PrimitiveType::<<$S as Primitive>::LargestType>::default().logical_type_id().data_type())
Ok(PrimitiveType::<<$S as Primitive>::LargestType>::default().into())
},
{
unreachable!()

View File

@@ -1,10 +1,9 @@
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, InvalidInputStateSnafu,
Result,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
@@ -174,10 +173,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct ScipyStatsNormCdfAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct ScipyStatsNormCdfAccumulatorCreator {}
impl AggregateFunctionCreator for ScipyStatsNormCdfAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -200,23 +198,6 @@ impl AggregateFunctionCreator for ScipyStatsNormCdfAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 2, InvalidInputStateSnafu);

View File

@@ -1,10 +1,9 @@
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, InvalidInputStateSnafu,
Result,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
@@ -174,10 +173,9 @@ where
}
}
#[derive(Debug, Default)]
pub struct ScipyStatsNormPdfAccumulatorCreator {
input_types: ArcSwapOption<Vec<ConcreteDataType>>,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct ScipyStatsNormPdfAccumulatorCreator {}
impl AggregateFunctionCreator for ScipyStatsNormPdfAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
@@ -200,23 +198,6 @@ impl AggregateFunctionCreator for ScipyStatsNormPdfAccumulatorCreator {
creator
}
fn input_types(&self) -> Result<Vec<ConcreteDataType>> {
let input_types = self.input_types.load();
ensure!(input_types.is_some(), InvalidInputStateSnafu);
Ok(input_types.as_ref().unwrap().as_ref().clone())
}
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> {
let old = self.input_types.swap(Some(Arc::new(input_types.clone())));
if let Some(old) = old {
ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
for (x, y) in old.iter().zip(input_types.iter()) {
ensure!(x == y, InvalidInputStateSnafu);
}
}
Ok(())
}
fn output_type(&self) -> Result<ConcreteDataType> {
let input_types = self.input_types()?;
ensure!(input_types.len() == 2, InvalidInputStateSnafu);

View File

@@ -6,8 +6,14 @@ edition = "2021"
[dependencies]
api = { path = "../../api" }
async-trait = "0.1"
common-base = { path = "../base" }
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
snafu = { version = "0.7", features = ["backtraces"] }
tokio = { version = "1.0", features = ["full"] }
tonic = "0.8"
tower = "0.4"
[dependencies.arrow]
package = "arrow2"

Some files were not shown because too many files have changed in this diff Show More