Compare commits

...

171 Commits

Author SHA1 Message Date
Ning Sun
deaa1f9578 ci: move components to flakes so it won't affect builders (#5464)
* ci: move components to flakes so it won't affect builders

* chore: add gnuplot for benchmark/criterion
2025-01-31 08:55:59 +00:00
Ruihang Xia
f378d218e9 perf: optimize writing non-null primitive value (#5460)
* avoid using arrow builder

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* optimize from_vec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-30 14:53:59 +00:00
yihong
5b6279f191 fix: no need for special case since datafusion updated (#5458)
* fix: no need for special case since datafusion updated

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: drop useless tests

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-26 05:57:23 +00:00
Ning Sun
698b28c636 feat: provide options to disable or customize http corss-origin settings (#5450)
* feat: add cors headers for http server

* test: add cors test

* test: add preflight test

* feat: allow customize http cross-origin settings

* chore: typo fix

* test: update tests

* test: fix test for config

* refactor: address review comments
2025-01-26 03:55:34 +00:00
yihong
c4d10313e6 fix: better error handler for the time range close #5449 (#5453)
* fix: better error handler for the time range close #5499

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: wrong compare

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-26 03:33:12 +00:00
Ruihang Xia
f165bfb0af fix: remove metric engine's internal column from promql's query (#5032)
* fix: remove metric engine's internal column from promql's query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unwrap

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* filter out physical table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-26 03:10:59 +00:00
Weny Xu
4111c18d44 chore: avoid necessary cloning (#5454)
* chore: avoid necessary cloning

* Apply suggestions from code review

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-25 14:16:22 +00:00
Ruihang Xia
5abe4c141a feat: expose http endpoint for flownode and metasrv (#5437)
* feat: expose http endpoint for flownode and metasrv

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adjust health check

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-25 13:20:25 +00:00
yihong
adb5c3743c fix: flush table panic when table has interval column close #3235 (#5422)
* fix: flash table panic when table has interval column close #3235

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* Revert "fix: flash table panic when table has interval column close #3235"

This reverts commit ffc63efda39cd6ef525313b60ede061c5ec24b12.

* fix: create table do not support interval type for now close #3235

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: sqlness

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments fix conflict and more tests

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address final comments drop useless sqlness tests

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-25 08:09:55 +00:00
Ruihang Xia
7c5ead90ac feat: mirror insert request to flownode in async (#5444)
* feat: mirror insert request to flownode in async

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-24 13:42:27 +00:00
shuiyisong
d870987a65 chore: update toolchain to 2024-12-25 (#5430)
* chore: update toolchain to 2024-12-25

* chore: fix clippy

* feat: update flakes

* chore: remove `rerun-if-changed` for now

* chore: update shadow-rs

* fix: clippy

* chore: update version in DEV_BUILDER_IMAGE_TAG

---------

Co-authored-by: Ning Sun <sunning@greptime.com>
2025-01-24 09:30:54 +00:00
Ning Sun
dce4ed9f1d feat: add CORS headers for http interfaces (#5447)
* feat: add cors headers for http server

* test: add cors test

* test: add preflight test
2025-01-24 09:28:04 +00:00
yihong
bbfbc9f0f8 fix: drop unused numpy code since pyo3 rustpython do not support any more (#5442)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-24 08:20:01 +00:00
Weny Xu
b107384cc6 feat(metric-engine): support to write rows with sparse primary key encoding (#5424)
* feat: support to write rows with sparse primary key encoding

* feat: cache decoded primary key

* chore: remove unused code

* feat: create physical table based on the engine config

* chore: log primary key encoding info

* fix: correct sqlness test

* chore: correct config.md

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2025-01-24 06:56:09 +00:00
zyy17
2802c8bf28 ci: update dev-builder version to fix build android image failed (#5445)
ci: update dev-builder version
2025-01-24 06:48:26 +00:00
zyy17
9b9784a557 fix: install x86-64 protoc on android dev-builder (#5443) 2025-01-24 04:39:39 +00:00
yihong
1e61d05211 fix: arm actions test failed (#5433)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-24 03:43:59 +00:00
Ning Sun
d53b9fbd03 ci: switch to nix flakes for more reproducible builds (#5426) 2025-01-24 03:30:45 +00:00
zyy17
d01bc916f1 ci: unify all protoc version to 29.3 (#5434)
Co-authored-by: Ning Sun <sunng@protonmail.com>
Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
2025-01-24 03:29:11 +00:00
Stephan3555
8ea463f516 feat: Address different Metrics for Prometheus queries in the Dashboard and fix typo in metric name (#5441)
Fix typo in metric and add metric for prometheus compatible endpoint

Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
2025-01-24 03:28:05 +00:00
Zhenchi
088317fd3a fix: unexpected warning on applying bloom (#5431)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-24 03:09:17 +00:00
Ning Sun
69881e3bc1 ci: allow skipping tests as required tasks (#5436)
ci: allow skipping tests
2025-01-24 03:04:55 +00:00
Ruihang Xia
9af4160068 fix(log-query): panic on prometheus (#5429)
* fix(log-query): panic on prometheus

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test environment setup

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-23 11:48:37 +00:00
zyy17
45e68603a1 ci: update dev-builder version (#5435) 2025-01-23 11:43:10 +00:00
zyy17
1eb4b8ed4f refactor: support to flatten json object in greptime_identity pipeline (#5358)
* refactor: support to flatten json object in greptime_identity pipeline

* refactor: add GreptimeIdentityPipelineParams to configure greptime_identity pipeline

* refactor: pass greptime identity pipeline params by one header kv

* refactor: code review

* refactor: make pipeline params more general for all internal pipelines

* chore: remove axum deps from pipeline

* fix: clippy errors

* chore: fix and add test

* test: adopt api change for test client

---------

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: Ning Sun <sunng@protonmail.com>
Co-authored-by: Ning Sun <sunning@greptime.com>
2025-01-23 08:50:50 +00:00
Weny Xu
05f21679d6 feat: replace DensePrimaryKeyCodec with Arc<dyn PrimaryKeyCodec> (#5408)
* feat: use `PrimaryKeyCodec` trait object

* feat: introduce `RewritePrimaryKey`

* chore: apply suggestions from CR

* fix: fix clippy

* chore: add comments
2025-01-23 08:44:17 +00:00
Yingwen
35b635f639 feat!: Bump datafusion, prost, hyper, tonic, tower, axum (#5417)
* change dep

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: adapt to arrow's interval array

* chore: fix compile errors in datatypes crate

* chore: fix api crate compiler errors

* chore: fix compiler errors in common-grpc

* chore: fix common-datasource errors

* chore: fix deprecated code in common-datasource

* fix promql and physical plan related

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wip: upgrading network deps

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* block on updating `sqlparser`

* upgrade sqlparser

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adapt new df's trait requirements

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: fix compiler errors in mito2

* chore: fix common-function crate errors

* chore: fix catalog errors

* change import path

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: fix some errors in query crate

* chore: fix some errors in query crate

* aggr expr and some other tiny fixes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: fix expr related errors in query crate

* chore: fix query serializer and admin command

* chore: fix grpc services

* feat: axum serve

* chore: fix http server

* remove handle_error handler
* refactor timeout layer
* serve axum

* chore: fix flow aggr functions

* chore: fix flow

* feat: fix errors in meta-srv

* boxed()
* use TokioIo

* feat!: Remove script crate and python feature (#5321)

* feat: exclude script crate

* chore: simplify feature

* feat: remove the script crate

* chore: remove python feature and some comments

* chore: fix warning

* chore: fix servers tests compiler errors

* feat: fix tests-integration errors

* chore: fix unused

* test: fix catalog test

* chore: fix compiler errors for crates using common-meta

testing feature is enabled when check with --workspace

* test: use display for logical plan test

* test: implement rewrite for ScanHintRule

* fix: http server build panic

* test: fix mito test

* fix: sql parser type alias error

* test: fix TestClient not listen

* test: some flow tests

* test(flow): more fix

* fix: test_otlp_logs

* test: fix promql test that using deprecated method fun()

* fix: sql type replace supports Int8 ~ Int64, UInt8 ~ UInt64

* test: fix infer schema test case

* test: fix tests related to plan display

* chore: fix last flow test

* test: fix function format related assertion

* test: use larger port range for tests

* fix: test_otlp_traces

* fix: test_otlp_metrics

* fix range query and dist plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: flow handle distinct use deprecated field

* fix: can't pass Join plan expressions to LogicalPlan::with_new_exprs

* test: fix deserialize test

* test: reduce split key case num

* tests: lower case aggr func name

* test: fix some sqlness tests

* tests: more sqlness fix

* tests: fixed sqlness test

* commit non-bug changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: make our udf correct

* fix: implement empty methods of ContextProvider for DfContextProviderAdapter

* test: update sqlness test result

* chore: remove unused

* fix: provide alias name for AggregateExprBuilder in range plan

* test: update range query result

* fix: implement missing ContextProvider methods for DfContextProviderAdapter

* test: update timestamps, cte result

* fix: supports empty projection in mito

* test: update comment for cte test

* fix: support projection for numbers

* test: update test cases after projection fix

* fix: fix range select first_value/last_value

* fix: handle CAST and time index conflict

* fix: handle order by correctly in range first_value/last_value

* test: update sqlness result

* test: update view test result

* test: update decimal test

wait for https://github.com/apache/datafusion/pull/14126 to fix this

* feat: remove redundant physical optimization

todo(ruihang): Check if we can remove this.

* test: update sqlness test result

* chore: range select default sort use nulls_first = false

* test: update filter push down test result

* test: comment deciaml test to avoid different panic message

* test: update some distributed test result

* test: update test for distributed count and filter push down

* test: update subqueries test

* fix: SessionState may overwrite our UDFs

* chore: fix compiler errors after merging main

* fix: fix elasticsearch and dashboard router panic

* chore: fix common-functions tests

* chore: update sqlness result

* test: fix id keyword and update sqlness result

* test: fix flow_null test

* fix: enlarge thread size in debug mode to avoid overflow

* chore: fix warnings in common-function

* chore: fix warning in flow

* chore: fix warnings in query crate

* chore: remove unused warnings

* chore: fix deprecated warnings for parquet

* chore: fix deprecated warning in servers crate

* style: fix clippy

* test: enlarge mito cache tttl test ttl time

* chore: fix typo

* style: fmt toml

* refactor: reimplement PartialOrd for RangeSelect

* chore: remove script crate files introduced by merge

* fix: return error if sql option is not kv

* chore: do not use ..default::default()

* chore: per review

* chore: update error message in BuildAdminFunctionArgsSnafu

Co-authored-by: jeremyhi <jiachun_feng@proton.me>

* refactor: typed precision

* update sqlness view case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: flow per review

* chore: add example in comment

* chore: warn if parquet stats of timestamp is not INT64

* style: add a newline before derive to make the comment more clear

* test: update sqlness result

* fix: flow from substrait

* chore: change update_range_context log to debug level

* chore: move axum-extra axum-macros to workspace

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: luofucong <luofc@foxmail.com>
Co-authored-by: discord9 <discord9@163.com>
Co-authored-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2025-01-23 06:15:40 +00:00
Yohan Wal
3ed085459c feat(remote-wal): introduce TopicRegionManager (#5407)
* feat: add manager to map region to topic

* chore: add a delete

* chore: rename keys

* chore: update config file

* fix: fix unit test

* chore: change prefix

* chore: clean up

* chore: follow review comments

* chore: follow review comments

* chore: follow review comments

* chore: follow review comments

* chore: follow review comments
2025-01-22 06:06:27 +00:00
Lei, HUANG
51a8d0a726 fix: avoid suppress manual compaction (#5399)
* fix/avoid-suppress-manual-compaction:
 **Refactor Compaction Logic**

 - Removed `PendingCompaction` struct and integrated its functionality directly into `CompactionStatus` in `compaction.rs`.
 - Simplified waiter management by consolidating waiter handling logic into `CompactionStatus`.
 - Updated `CompactionRequest` creation to directly handle waiters without intermediate structures.
 - Adjusted test cases in `compaction.rs` to align with the new waiter management approach.

(cherry picked from commit 87e2d1c2cc9bd82c02991d22e429bef25c5ee348)

* fix/avoid-suppress-manual-compaction:
 ### Add Support for Manual Compaction Requests

 - **Compaction Logic Enhancements**:
   - Updated `CompactionScheduler` in `compaction.rs` to handle manual compaction requests using `Options::StrictWindow`.
   - Introduced `PendingCompaction` struct to manage pending manual compaction requests.
   - Added logic to reschedule manual compaction requests once the current compaction task is completed.

 - **Testing**:
   - Added `test_manual_compaction_when_compaction_in_progress` to verify the handling of manual compaction requests during ongoing compaction processes.

 These changes enhance the compaction scheduling mechanism by allowing manual compaction requests to be queued and processed efficiently.

(cherry picked from commit bc38ed0f2f8ba2c4690e0d0e251aeb2acce308ca)

* chore: fix conflicts

* fix/avoid-suppress-manual-compaction:
 ### Add Error Handling for Manual Compaction Override

 - **`compaction.rs`**: Enhanced the `set_pending_request` method to handle manual compaction overrides by sending an error to the waiter if a previous request exists.
 - **`error.rs`**: Introduced a new error variant `ManualCompactionOverride` to represent manual compaction being overridden, and mapped it to the `Cancelled` status code.

* fix: format

* fix/avoid-suppress-manual-compaction:
 **Add Error Handling for Pending Compaction Requests**

 - Enhanced error handling in `compaction.rs` by adding logic to handle errors for pending compaction requests.
 - Introduced a mechanism to send errors using `waiter.send` when a pending compaction request fails, ensuring proper error propagation and context with `CompactRegionSnafu`.

* fix/avoid-suppress-manual-compaction:
 **Fix Typo and Simplify Code Logic in `compaction.rs`**

 - Corrected a typo in the license comment from "langucage" to "language".
 - Simplified the logic for handling `pending_compaction` in `CompactionStatus` by removing unnecessary pattern matching and directly accessing `waiter`.

* fix: typo
2025-01-22 05:36:39 +00:00
Weny Xu
965a48656f feat(metric-engine): introduce RowModifier for MetricEngine (#5380)
* feat(metric-engine): store physical table ColumnIds in `MetricEngineState`

* feat(metric-engine): introduce `RowModifier` for MetricEngine

* chore: upgrade greptime-proto

* feat: introduce `WriteHint` to `RegionPutRequest`

* chore: apply suggestions from CR

* chore: udpate greptime-proto

* chore: apply suggestions from CR

* chore: add comments

* chore: update proto
2025-01-22 05:16:44 +00:00
dennis zhuang
4259975be9 feat: support not-equal matcher for PromQL metric names (#5385)
* feat: make instant_query and range_query to supports not-equal matchers

* feat: impl query_metric_names

* feat: forgot some files and refactor

* chore: test and docs

* fix: typo

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: parse_query

* chore: improve test

* fix: use current catalog to query information_schema

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-22 03:04:40 +00:00
yihong
d2f3f2e24d fix: vector function for PromQL need to ignore the time index also (#5398)
* fix: vector function for PromQL need to ignore the time index also close #5392

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: do not affect scalar function

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: betteer name for it

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-22 02:51:12 +00:00
Zhenchi
f74a955504 feat: bloom filter as fulltext index v2 (Part 1) (#5406)
* feat: bloom filter as fulltext index v2

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* add unit tests for tokenizer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor dup vars

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-21 23:33:11 +00:00
Ruihang Xia
6f1b5101a3 feat(grafana): update cluster dashboard to include flow-related metrics (#5419) 2025-01-21 20:21:01 +08:00
discord9
9f626ec776 chore: better error msg (#5415) 2025-01-21 07:32:32 +00:00
Niwaka
0163ce8df9 feat: add column if not exists (#5393)
* feat: add column if not exists

* chore: address reviews
2025-01-21 02:38:25 +00:00
shuiyisong
2ab235ec9d chore: extract service_name in OTLP traces by default (#5412)
chore: add service_name in traces
2025-01-21 02:34:56 +00:00
Zhenchi
281d9a5920 fix: matches incorrectly uses byte len as char len (#5411)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-21 02:34:36 +00:00
Weny Xu
385b1bcbb0 feat(metric-engine): introduce index options from metric engine (#5374)
* feat(metric-engine): introduce index options from metric engine

* chore: fmt toml

* test: add sqlness test

* fix: ignore internal columns

* chore: remove unused dep

* chore: update sqlness result

* chore: ignore metric engine internal columns

* chore: refine code styling

* test: update sqlness test

* refactor: refactor `create_table_constraints`

* test: show index

* chore: apply suggestions from CR

* fix: set inverted index explicitly

* chore: apply suggestions from CR
2025-01-20 08:48:00 +00:00
Yohan Wal
5287d46073 refactor: use MetadataKey for kafka topic (#5351)
* refactor: use MetadataKey

* fix: match all prefix

* refactor: introduce TopicPool

* fix: fix test, some rename

* test: add unit test for legacy restore

* fix: add _ between prefix and topic id

* chore: readable legacy topics

* refactor: a refactor

* Apply suggestions from code review

* Apply suggestions from code review

* refactor: introduce TopicPool

* fix: fix unit test

* chore: fix unit test and add some comments

* fix: fix unit test

* refactor: just refactor

* refactor: rename

* chore: rename, comments and remove unnecessary clone
2025-01-20 07:38:22 +00:00
Lei, HUANG
64ce9d3744 chore(http): change authorization header (#5389)
* chore/change-authorization-header:
 ### Add Custom Authorization Header Support

 - **Files Modified**: `http.rs`, `authorize.rs`, `authorize.rs` (tests)
 - **Key Changes**:
   - Introduced a custom authorization header `x-greptime-auth` in `http.rs`.
   - Updated authorization logic in `authorize.rs` to support both `x-greptime-auth` and the standard `Authorization` header.
   - Enhanced test cases in `authorize.rs` to validate the new custom header functionality.

* chore: add more tests
2025-01-20 07:09:44 +00:00
yihong
80790daae0 fix: better sqlness show, replace the unwarp with better show message (#5400)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-20 04:59:39 +00:00
Ning Sun
5daac5fe3d ci: revert coverage runner (#5403) 2025-01-20 03:52:38 +00:00
Weny Xu
4323c20d18 feat(metric-engine): introduce experimental_sparse_primary_key_encoding to MetricEngineConfig (#5373)
* feat: introduce `experimental_sparse_primary_key_encoding` to `MetricEngineConfig`

* fix: unit test
2025-01-20 03:49:39 +00:00
Ning Sun
f53b6777cc ci: use arm builders for tests (#5395) 2025-01-20 02:12:12 +00:00
discord9
87c21e2baa fix(flow): deal with flow drop leftover (#5391)
* fix: deal with flow drop leftover

* chore: make it warn

* chore: apply suggestion.

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: review

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-19 12:55:25 +00:00
yihong
d072801ad6 fix: drop unused pub fn using to cargo_workspace_unused (#5352)
* fix: drop unused pub fn using to cargo_workspace_unused

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: seems is_sum can delete too

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-18 15:59:21 +00:00
Lei, HUANG
0607b38a20 chore: change default compaction output size limit to 2GB (#5378)
chore/change-default-compaction-output-size-limit:
 ### Update `TwcsOptions` Default Configuration

 - Modified the default value of `max_output_file_size` in `TwcsOptions` to `Some(ReadableSize::gb(2))` in `src/mito2/src/region/options.rs`.
2025-01-18 15:11:36 +00:00
Yingwen
e0384a7d46 feat: overwrites inferred compaction window by region options (#5396)
* feat: use time window in compaction options for compaction window

* test: add tests for overwriting options

* chore: typo

* chore: fix a grammar issue in log
2025-01-18 14:53:56 +00:00
Ning Sun
d73815ba84 feat: pipeline dispatch part 1, add definition (#5359)
* feat: add dispatcher definition

* feat: add dispatcher element in pipelien definition

* fmt: correct format

* test: add negative tests

* fmt: fix format

* refactor: replace consts

* feat: add tostring for dispatcher

* refactor: remove to_string which is actually debug

* Update src/pipeline/src/dispatcher.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-18 11:47:17 +00:00
yihong
c78a492863 refactor: drop useless clone for some better performance using static chek (#5388)
refactor: drop useless clone for some better performance using static check

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-17 13:50:37 +00:00
Yingwen
859717c309 ci: always build standard db (#5390)
ci: remove dev-mode = false in building standard db
2025-01-17 13:10:46 +00:00
yihong
52697a9e66 fix: maybe double free from static str in Snafu (#5383)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2025-01-17 08:24:26 +00:00
discord9
f8d26b433e fix: make flow worker actually run in parallel (#5384)
* fix: make flow worker actually run in parallel

* chore: check for underflow

* fix: del duplicate sub

* fix: print server handle error
2025-01-16 14:32:33 +00:00
Zhenchi
1acfb6ed1c feat!: use indirect indices for bloom filter to reduce size (#5377)
* feat!(bloom-filter): use indirect indices to reduce size

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix format

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* update proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* nit

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* upgrade proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-16 13:18:29 +00:00
Yingwen
7eaabb3ca2 fix: increase in progress scan gauge and adjust histogram buckets (#5370)
* fix: in progress scan doesn't inc

* feat(mito): adjust mito histogram buckets

* chore(metric-engine): adjust metric engine histogram bucket
2025-01-16 12:53:03 +00:00
Yingwen
3a55f5d17c test: fix config api test (#5386)
put content_cache_page_size to correct place
2025-01-16 12:28:31 +00:00
Ruihang Xia
8d5d4000e6 feat: set default compaction parallelism (#5371)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-16 11:16:56 +00:00
Ruihang Xia
a598008ec3 fix: panic when received invalid query string (#5366)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-16 11:05:46 +00:00
Ruihang Xia
86bd54194a feat: digest pipeline processor (#5323)
* feat: basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply code review comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

* follow the naming master

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-16 09:46:37 +00:00
Weny Xu
ccd2b06b7a fix!: correct index cache config (#5381)
* fix: correct index cache config

* chore: update config.md
2025-01-16 09:46:02 +00:00
Yohan Wal
0db10a33d0 chore: update proto rev (#5379) 2025-01-16 08:06:28 +00:00
discord9
317fe9eaa5 feat: flow's http server (#5372)
* feat: flow's http server

* feat: add cli options for http addr

* test: sqlness runner http addr

* feat: metrics

* chore: also shutdown http server
2025-01-16 07:25:30 +00:00
Lanqing Yang
a4761d6245 feat: Alter inverted index (#5131)
feat: support alter inverted index.
2025-01-16 07:09:27 +00:00
discord9
758aef39d8 feat: filter batch by sequence in memtable (#5367)
* feat: add seq field

* feat: filter by sequence

* chore: per review

* docs: explain why not prune

* chore: correct doc

* test: test filter by seq
2025-01-16 04:44:28 +00:00
ZonaHe
4e3dd04f42 feat: update dashboard to v0.7.9-rc.1 (#5368)
feat: update dashboard to v0.7.9-rc

Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-01-16 03:43:38 +00:00
Weny Xu
18b77408ae feat: introduce SparsePrimaryKeyCodec and SparsePrimaryKeyFilter (#5365)
* feat(mito): introduce `SparseRowCodec`

* feat(mito): introduce `SparsePrimaryKeyFilter`

* chore: apply suggestions from CR
2025-01-16 03:10:43 +00:00
Ning Sun
725d5a9e68 fix: redirect /dashboard to /dashboard/ (#5369)
* fix: redirect /dashboard to /dashboard/

* test: update integration test
2025-01-16 03:04:08 +00:00
Ruihang Xia
4f29e50ef3 feat: refine log query AST (#5316)
* draft

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl planner part

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: tweak aggr func

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* todo about context

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename log expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sign todo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-15 12:11:24 +00:00
Ning Sun
121ec7936f ci: do not collect coverage from pull request any more (#5364)
* ci: do not collect coverage from pull request any more

* fix: disable toolchain cache
2025-01-15 09:33:49 +00:00
discord9
0185a65905 feat(flow): refill flow task def(Part 2) (#5317)
* feat: refill task def

* chore: per review

* chore: after rebase
2025-01-15 08:48:54 +00:00
yihong
f0d30a0f26 fix: better makefile help show, show fuzz and fuzz-ls also (#5363)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-15 08:17:23 +00:00
yihong
7e61d1ae27 feat: support pg_database for DBeaver. (#5362)
This patch support pg_database for pg_catalog, also add query replace,
in fixtures.rs for the reason that datafusion do not support sql like
'select 1,1;' more can check issue #5344.

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-15 07:05:34 +00:00
yihong
e56dd20426 fix: panic and interval when do not have keyword interval (#5339)
* fix: panic and interval when do not have keyword

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: wrong pos...

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments drop the unreachable

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments and add sqlness tests

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-15 06:42:24 +00:00
Weny Xu
b64c075cdb feat: introduce PrimaryKeyEncoding (#5312)
* feat: introduce `PrimaryKeyEncoding`

* fix: fix unit tests

* chore: add empty line

* test: add unit tests

* chore: fmt code

* refactor: introduce new codec trait to support various encoding

* fix: fix unit tests

* chore: update sqlness result

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2025-01-15 06:16:53 +00:00
Weny Xu
57f8afcb70 chore: avoid sending create table requests for already existing tables (#5347)
* chore: avoid sending create table requests for already existing tables

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2025-01-15 02:50:25 +00:00
Ruihang Xia
bd37e086c2 fix: improve error handling in pipeline and log in log store (#5357)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-14 13:58:30 +00:00
discord9
66f63ae981 feat: more than one flow workers (#5315)
* feat: more workers

* feat: use round robin

* refactor: per review

* refactor: per bot review

* chore: per review

* docs: example

* docs: update config.md

* docs: update

* chore: per review

* refactor: set workers to cpu/2.max(1)

* fix: flow config in standalone mode

* test: fix config test

* docs: update docs&opt name

* chore: update config.md

* refactor: per review, sanitize at top

* chore: per review

* chore: config.md
2025-01-14 12:28:18 +00:00
discord9
95b20592ac fix: handle insert default value (#5307)
* fix: handle flow inserts with default values

* test: sqlness

* chore: typo

* chore: newline

* feat(WIP): impure default filler

* feat: fill impure default values

* test: add test for default fill impure

* feat: check for impure

* fix: also handle stmt to region

* refactor: per review

* refactor: per review

* chore: rebase fix

* chore: clippy

* chore: per review
2025-01-14 09:06:53 +00:00
ZonaHe
1855dccdf1 feat: update dashboard to v0.7.8 (#5355)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-01-14 09:00:08 +00:00
Yiran
5efcb41310 ci: automatically bump doc version when release GreptimeDB (#5343)
* ci: automatically bump doc version when release GreptimeDB

* add license header
2025-01-14 08:20:58 +00:00
Ning Sun
f5829364a2 fix: security fix, sqlx, hashbrown, idna and CI updates (#5330)
* fix: security fix, sqlx, hashbrown, idna

* ci: optimize ci cache generation

* feat: update pprof
2025-01-14 08:19:33 +00:00
Xuanwo
87bd12d6df refactor: Bump opendal to 0.51.1 (#5354)
* refactor: Bump opendal to 0.51.1

Signed-off-by: Xuanwo <github@xuanwo.io>

* Ignore dirs from cache

Signed-off-by: Xuanwo <github@xuanwo.io>

* Reduce extra alloc

Signed-off-by: Xuanwo <github@xuanwo.io>

---------

Signed-off-by: Xuanwo <github@xuanwo.io>
2025-01-14 07:28:09 +00:00
ZonaHe
c370b4b40d feat: update dashboard to v0.7.7 (#5350)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-01-13 07:35:12 +00:00
zyy17
3f01f67f94 refactor(elasticsearch): use _index as greptimedb table in log ingestion and add /${index}/_bulk API (#5335)
* refactor(elasticsearch): use `_index` as greptimedb table in log ingestion and add `/${index}/_bulk` API

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: code review

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2025-01-13 06:58:04 +00:00
Weny Xu
6eb746d994 fix: skip building indexer when indexed_column_ids are empty (#5348) 2025-01-13 05:15:20 +00:00
yihong
03a144fa56 chore: drop useless import raw_normalize_path in object-store lib (#5349)
chore: drop use less import raw_normalize_path in store lib

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-13 04:08:36 +00:00
Yohan Wal
f069ea082f fix(test): use different table name (#5334) 2025-01-10 17:09:23 +00:00
ZonaHe
9ae48010f0 feat: update dashboard to v0.7.6 (#5340)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-01-10 17:06:09 +00:00
yihong
3a996c2f00 feat: add set search_path to 'xxx' for pg (#5342)
* feat: add set search_path to 'xxx' for pg

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-10 17:05:10 +00:00
ZonaHe
45d4065fd6 feat: update dashboard to v0.7.4 (#5336)
Co-authored-by: sunchanglong <sunchanglong@users.noreply.github.com>
2025-01-10 08:37:02 +00:00
discord9
9e09be7ba6 chore: update rustls (#5337)
* deps: update rustls

* chore: update lock

* chore: server use workspace rustls

* chore: comment on version
2025-01-10 08:17:40 +00:00
Niwaka
50583815de feat: support alter add multiple columns (#5262)
* feat: support alter add multiple columns

* fix: address review

* chore: add column format
2025-01-10 06:14:17 +00:00
yihong
24ea9cf215 feat: add show search_path for pg (#5328)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-10 04:04:05 +00:00
yihong
78d0fa75c9 fix: android build (#5329)
* fix: android build

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: better fix address comments

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-09 14:47:29 +00:00
Ning Sun
0685ba265c ci: disable cache for some tasks, create cache in nightly build (#5324)
* ci: disable cache for some tasks

* ci: add a nightly test to create rust cache on main
2025-01-09 11:14:13 +00:00
Weny Xu
be22da775a build: disable local IP detection feature in Android binary (#5327)
build: disable local ip detection feature in android binary
2025-01-09 11:13:48 +00:00
yihong
d33309be2b fix: drop all python embedding code for docker and doc (#5325)
* fix: drop all python embedding code for docker and doc

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments drop the left python

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-09 10:20:26 +00:00
Lin Yihai
fdbfebf4be feat: Add VEC_PRODUCT, VEC_ELEM_PRODUCT, VEC_NORM. (#5303)
* feat: Add `vec_product(col)` function.

* feat: Add `vec_elem_product` function

* feat: Add `vec_norm` function.
2025-01-09 06:26:51 +00:00
yihong
812a775b3d ci: drop useless deadsnake (#5311)
fix: drop useless deadsnake

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-08 14:56:10 +00:00
Yohan Wal
751fa4ede9 feat(config): make table name of pg backend configurable (#5244)
* feat: configurable table name

* fix: election sql

* feat: configurable lock_id

* chore: update config file

* perf: useless allocation

* perf: useless allocation

* chore: remove unused type hint

* Apply suggestions from code review

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* chore: update config file

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2025-01-08 14:55:35 +00:00
zyy17
03a2e6d0c1 feat: support elasticsearch _bulk API to ingest logs (#5261)
* feat: support elasticsearch '_bulk' API to ingest logs

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: code review

* refactor: add metrics

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2025-01-08 14:54:53 +00:00
Ning Sun
815ce59a3a ci: use mold for tests (#5319)
* ci: use mold for tests

* ci: enable rust cache saving for merge group
2025-01-08 12:41:04 +00:00
Yingwen
c19a56c79f feat!: Remove script crate and python feature (#5321)
* feat: exclude script crate

* chore: simplify feature

* feat: remove the script crate

* chore: remove python feature and some comments

* chore: fix warning
2025-01-08 12:11:53 +00:00
Weny Xu
7f307a4cac feat: auto detecting local IP to use as hostname (#5314)
* feat: auto detecting hostname

* chore: update config.md

* chore: bring back hostname

* chore: apply suggestions from CR

* chore: use const

* chore: apply suggesions from CR

* fix: fix sqlness
2025-01-08 12:06:31 +00:00
yihong
52eebfce77 feat: support select session_user; (#5313)
* feat: support `select session_user;`

This commit is part of support DBeaver that support function
select session_user like postgres did.

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: lint problem

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: address comments add tests

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-08 09:44:54 +00:00
Ning Sun
e18416a726 ci: do not trigger tests when there is a merge conflict (#5318)
* ci: do not trigger tests when there is a merge conflict

* Update .github/workflows/develop.yml

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* ci: disable cache from rust toolchain action

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2025-01-08 09:02:49 +00:00
shuiyisong
d1f8ea7880 perf: parse Loki labels in protobuf write path (#5305)
* chore: parse loki labels

* chore: add bench

* chore: add comment

* chore: add test

* chore: remove unnecessory default value and update test

* fix: typo and test

* chore: cr issue

* chore: cr issue
2025-01-08 07:55:06 +00:00
LFC
2cd1b08ff7 refactor: optimize out partition split insert requests (#5298)
* test: optimize out partition split insert requests if there is only one region

* Now that the optimization for single region insert has been lifted up, the original "fast path" can be obsoleted.

* resolve PR comments
2025-01-08 07:26:29 +00:00
discord9
0ee41339aa feat(flow): flow refill state (Part 1) (#5295)
* feat(flow): (Part 1) refill utils

* chore: after rebase fix

* chore: more rebase

* rm refill.rs to reduce pr size

* chore: simpler args

* refactor: per review

* docs: more explain for instant requests

* refactor: per review
2025-01-08 06:45:56 +00:00
yihong
369b59c84a fix: drop unused dep using udeps to minial the size (#5301)
* fix: drop unused dep using udeps to minial the size

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: adress comments fix the problem

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-08 06:36:22 +00:00
Zhenchi
c305b2b406 fix: show index to display skipping index (#5297)
* fix: show index to display skipping index

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix sqlness

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix sqlness

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-08 04:59:40 +00:00
Weny Xu
c89ef85902 chore(nightly-ci): remove pg_kvbackend feature gate in windows ci (#5310) 2025-01-08 03:53:58 +00:00
Weny Xu
3d9df822ad refactor: refactor PgStore (#5309)
* refactor: refactor PgStore

* fix: election use bytea and txn use Serializable to avoid read unrepeatable (#4)

* fix: election use bytea as well

* fix: use Serializable to avoid read unrepeatable

* chore: remove unused error

* ci: enable pg kvbackend and sqlness

* ci: switch on pg_kvbackend feature

* fix: fix sqlness runner

* chore: add pg_kvbackend feature gate

* build(ci): add feature gate

* fix: add retry for `PgStore` txn

* fix: correct `SET_IDLE_SESSION_TIMEOUT`

---------

Co-authored-by: Yohan Wal <1035325592@qq.com>
Co-authored-by: CookiePieWw <profsyb@gmail.com>
2025-01-07 07:27:58 +00:00
Lei, HUANG
bc2f05d949 docs: Added C/C++ building essentials to the prerequisites list (#5302)
Added C/C++ building essentials to the prerequisites list
2025-01-07 03:10:56 +00:00
Weny Xu
05f115e047 fix(pg_backend): correct set idle_in_transaction_session_timeout statement (#5304)
* fix(metasrv): correct `backend` field configuration

* refactor!: added `#[serde(rename_all = "snake_case")]` macro to the `BackendImpl` enum

* fix(metasrv): correct `set idle_in_transaction_session_timeout` statement

* build: enable `pg_backend` by default
2025-01-06 12:07:35 +00:00
Zhenchi
5cf9d7b6ca fix(bloom-filter): filter rows with segment precision (#5286)
* fix(bloom-filter): filter rows with segment precision

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* add case

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address TODO

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-01-06 11:45:15 +00:00
Ruihang Xia
a1cd194d0c feat: update standalone grafana with new metric name (#5278)
* feat: update standalone grafana with new metric name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Revert "feat: update standalone grafana with new metric name"

This reverts commit 7af38d52ac.

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* redo change

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change the cluster version

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

* change version

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-01-06 09:25:49 +00:00
shuiyisong
a56c430db0 fix: pass unknown encoding in decompression layer (#5300)
fix: allow pass unknown in decompression layer
2025-01-06 09:10:56 +00:00
discord9
6a1ec8db25 chore: cleanup dockerfile (#5299)
chore: cleanup
2025-01-06 09:06:10 +00:00
Ning Sun
04708f10aa feat(pipeline): allow coerce timestamp from integer values (#5270)
* feat: allow coerce timestamp from integer values

* test: add tests for parsing integer values
2025-01-06 08:53:09 +00:00
Ning Sun
ddf36c8324 ci: disable docker/rust cache temporarily and merge docker compose files (#5293)
* ci: disable docker cache temporarily and merge docker compose files

* ci: fix compose file name and options

* ci: try to disable rust cache
2025-01-06 08:23:46 +00:00
Weny Xu
96b2a5fb28 feat: introduce ParallelFstValuesMapper (#5276)
* refactor: `RangeReader` to use `&self`

* refactor: `InvertedIndexReader` to use `&self`

* refactor: refactor: `BloomFilterReader` to use `&self`

* feat: introduce `ParallelFstValuesMapper`

* chore: change prefetch size to 8KiB

* chore: add `file_size_hint` for cached blob reader

* chore: fix clippy

* refactor: remove `FstValuesMapper`

* chore: apply suggestions from CR
2025-01-06 07:33:35 +00:00
shuiyisong
bbbba29afc feat: support Loki JSON write (#5288)
* perf: small updates

* refactor: move loki to a separate file

* chore: extract content_type pattern matching

* chore: minor update

* feat: loki json write

* chore: add decompression http layer

* fix: label string value instead of to_string

* chore: add test

* fix: typo

* fix: license header

* chore: rename
2025-01-06 07:09:48 +00:00
Weny Xu
b229c94fba fix(fuzz): ensure all regions leases are renewed (#5294)
* fix(fuzz): ensure all regions leases are renewed

* fix: fix clippy
2025-01-06 06:21:41 +00:00
Kould
2ad50332cb feat: impl COPY a query resultset to external file (#5250)
* feat: impl COPY a query resultset to external file

* chore: add more tests for parse `copy_table_to`

* chore: add more tests for parse `copy_table_to`
2025-01-06 04:32:51 +00:00
Yohan Wal
513569ed5d feat: add Txn for pg kv backend (#5266)
* feat: txn for pg kv backend

* chore: clippy

* fix: txn uses one client

* test: clean up and txn test

* test: clean up

* test: change lock_id to avoid conflict in test

* test: use different prefix in pg election test

* fix(test): just a fix

* test: aggregate multiple test to avoid concurrency problem

* test: use uuid instead of rng

* perf: batch cmp in txn

* perf: batch same op in txn
2025-01-06 03:29:09 +00:00
Yingwen
69d9a2845f docs: update standalone example (#5290) 2025-01-04 11:30:25 +00:00
Kould
1067357b72 chore(config)!: refactor configs of write cache (#5259)
* chore: refactor configs of write cache

* chore: write_cache_size `10GiB` -> `5GiB`
2025-01-04 07:14:38 +00:00
Ning Sun
2caf003db0 fix: brings back functions required by downstream projects (#5283) 2025-01-04 03:49:01 +00:00
Yingwen
9bf9aa1082 chore: update greptime-proto to include add_if_not_exists (#5289) 2025-01-03 15:41:00 +00:00
discord9
353c8230db refactor: flow replace check&better error msg (#5277)
* chore: better error msg

* chore eof newline

* refactor: move replace check to flow worker

* chore: add ctx to insert flow failure

* chore: Update src/flow/src/adapter/flownode_impl.rs

* test: add order by for deterministic

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2025-01-03 10:59:58 +00:00
Lei, HUANG
577d81f14c chore: suppress list warning (#5280)
chore/suppress-list-warning:
 ### Update logging level in `intermediate.rs`

 - Changed logging level from `warn` to `debug` for unexpected directory entries in index creation.
 - Added `debug` to the `common_telemetry` import to support the logging level change.
2025-01-03 09:05:03 +00:00
yihong
856bba5d95 fix: better fmt check from 40s to 4s (#5279)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-01-03 08:12:49 +00:00
Yingwen
89399131dd feat: support add if not exists in the gRPC alter kind (#5273)
* test: test adding existing columns

* chore: add more checks to AlterKind

* chore: update logs

* fix: check and build table info first

* feat: Add add_if_not_exists flag to alter expr

* feat: skip existing columns when building alter kind

* checks in make_region_alter_kind()
* reuse the alter kind

* test: fix tests in common-meta

* chore: fix typos

* chore: update comments
2025-01-03 07:23:17 +00:00
discord9
d20b592fe8 fix: flow handle reordered inserts (#5275)
* fix: reorder correct schema

* tests: reorder insert handled correctly

* chore: rm unused

* refactor: per review

* chore: more comment

* chore: per review
2025-01-03 06:25:39 +00:00
Yohan Wal
bcb0f14227 refactor: adjust index cache page size (#5267)
* refactor: adjust index cache page size

* fix: wrong docs

* Update config/datanode.example.toml

* Update config/config.md

* Update config/config.md

* chore: adjust to 64KiB

* Apply suggestions from code review
2025-01-03 03:26:17 +00:00
Ning Sun
3b27adb3fe ci: update nix setup (#5272) 2025-01-03 03:13:04 +00:00
discord9
4d6fe31fff fix(flow): flow's table schema cache (#5251)
* fix: flow schema cache

* refactor: location for `to_meta_err`

* chore: endfile emptyline

* chore: review(partially)

* chore: per review

* refactor: per review

* refactor: per review
2025-01-02 10:33:23 +00:00
Yohan Wal
1b0b9add90 feat: use connection pool for pg kv backend in preparation for txn (#5260)
* feat: use connection pool

* chore: follow review comments

* fix: create table before test
2025-01-02 06:33:21 +00:00
chenmortal
2b89970d45 fix: import tokio-metrics and tokio-metrics-collector (#5264) 2025-01-02 05:58:31 +00:00
Ning Sun
53d006292d fix: correct invalid testing feature gate usage (#5258)
* fix: correct invalid testing feature gate usage

* test: refactor tests to avoid test code leak

* fix: sync main
2025-01-02 03:22:54 +00:00
discord9
d18c8b5e16 chore: typo (#5265)
* fix: a typo

* chore: even more typos
2025-01-02 03:17:53 +00:00
jeremyhi
e0949c4a11 feat: hints all in one (#5194)
* feat: hints all in one

* chore: If hints are provided in the x-greptime-hints header, ignore the rest of the headers
2025-01-02 02:56:33 +00:00
Ruihang Xia
5cf931c417 feat(log-query): implement pagination with limit and offset parameters (#5241)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-31 06:42:46 +00:00
Ning Sun
cc5b1d42b0 ci: disable pyo3 build tasks (#5256)
* ci: disable pyo3 build tasks

* ci: skip installing python for windows

* ci: also removed python dependencies from docker base image
2024-12-31 04:53:41 +00:00
Ruihang Xia
55b7656956 feat: override __sequence on creating SST to save space and CPU (#5252)
* override memtable sequence

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* override sst sequence

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore changes per to CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use correct sequence number

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wrap a method to get max sequence

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-31 03:28:02 +00:00
Yingwen
75e4f307c9 feat: update partition duration of memtable using compaction window (#5197)
* feat: update partition duration of memtable using compaction window

* chore: only use provided duration if it is not None

* test: more tests

* test: test compaction apply window

* style: fix clippy
2024-12-30 13:06:25 +00:00
Yohan Wal
89f2e15ffb feat: add election logic for PgElection (#5249)
* feat: init PgElection

* fix: release advisory lock

* fix: handle duplicate keys

* chore: update comments

* fix: unlock if acquired the lock

* chore: add TODO and avoid unwrap

* refactor: check both lock and expire time, add more comments

* test: add unit test for pg election

* chore: fmt

* chore: typo

* fix: add feature gate

* chore: visibility

* chore: follow review comments
2024-12-30 09:45:04 +00:00
zyy17
13ed10556a refactor: support to convert time string to timestamp in convert_value() (#5242)
refactor: support to covert time string to timestamp in convert_value()
2024-12-30 08:36:08 +00:00
Lin Yihai
d1108ab581 feat: add vec_div function (#5245) 2024-12-30 07:08:26 +00:00
Ning Sun
1287d4cb9f ci: make sure clippy passes before running tests (#5253)
* ci: make sure clippy passes before running tests

* ci: do not run ci on main branch
2024-12-30 07:01:15 +00:00
Zhenchi
109fe04d17 fix(bloom-filter): skip applying for non-indexed columns (#5246)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-12-30 06:56:58 +00:00
Yingwen
f1eb76f489 fix: implement a CacheStrategy to ensure compaction use cache correctly (#5254)
* feat: impl CacheStrategy

* refactor: replace Option<CacheManagerRef> with CacheStrategy

* feat: add disabled strategy

* ci: force update taplo

* refactor: rename CacheStrategy::Normal to CacheStrategy::EnableAll

* ci: force install cargo-gc-bin

* ci: force install

* chore: use CacheStrategy::Disabled as ScanInput default

* chore: fix compiler errors
2024-12-30 06:24:53 +00:00
Ruihang Xia
11bab0c47c feat: add sqlness test for bloom filter index (#5240)
* feat: add sqlness test for bloom filter index

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* drop table after finished

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* redact more variables

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-27 06:40:18 +00:00
shuiyisong
588f6755f0 fix: disable path label in opendal for now (#5247)
* fix: remove path label in opendal for now

* fix: typo

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-27 04:34:19 +00:00
Kould
dad8ac6f71 feat(vector): add vector functions vec_sub & vec_sum & vec_elem_sum (#5230)
* feat(vector): add sub function

* chore: added check for vector length misalignment

* feat(vector): add `vec_sum` & `vec_elem_sum`

* chore: codefmt

* update lock file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-26 15:07:13 +00:00
Yohan Wal
ef13c52814 feat: init PgElection with candidate registration (#5209)
* feat: init PgElection

fix: release advisory lock

fix: handle duplicate keys

chore: update comments

fix: unlock if acquired the lock

chore: add TODO and avoid unwrap

refactor: check both lock and expire time, add more comments

chore: fmt

fix: deal with multiple edge cases

feat: init PgElection with candidate registration

chore: fmt

chore: remove

* test: add unit test for pg candidate registration

* test: add unit test for pg candidate registration

* chore: update pg env

* chore: make ci happy

* fix: spawn a background connection thread

* chore: typo

* fix: shadow the election client for now

* fix: fix ci

* chore: readability

* chore: follow review comments

* refactor: use kvbackend for pg election

* chore: rename

* chore: make clippy happy

* refactor: use pg server time instead of local ones

* chore: typo

* chore: rename infancy to leader_infancy for clarification

* chore: clean up

* chore: follow review comments

* chore: follow review comments

* ci: unit test should test all features

* ci: fix

* ci: just test pg
2024-12-26 12:39:32 +00:00
Zhenchi
7471f55c2e feat(mito): add bloom filter read metrics (#5239)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-12-26 04:44:03 +00:00
Zhenchi
f4b2d393be feat(config): add bloom filter config (#5237)
* feat(bloom-filter): integrate indexer with mito2

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(config) add bloom filter config

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix docs

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix docs

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* merge

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* remove cache config

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-12-26 04:38:45 +00:00
localhost
0cf44e1e47 chore: add more info for pipeline dryrun API (#5232) 2024-12-26 03:06:25 +00:00
Ruihang Xia
00ad27dd2e feat(bloom-filter): bloom filter applier (#5220)
* wip

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* draft search logic

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use defined BloomFilterReader

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* round the range end

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finish index applier

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* integrate applier into mito2 with cache layer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix cache key and add unit test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* provide bloom filter index size hint

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert BloomFilterReaderImpl::read_vec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dead code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore null on eq

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more tests and fix bloom filter logic

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-26 02:51:18 +00:00
discord9
5ba8bd09fb fix: flow compare null values (#5234)
* fix: flow compare null values

* fix: fix again ck ty before cmp

* chore: rm comment

* fix: handle null

* chore: typo

* docs: update comment

* refactor: per review

* tests: more sqlness

* tests: sqlness not show create table
2024-12-25 15:31:27 +00:00
Zhenchi
a9f21915ef feat(bloom-filter): integrate indexer with mito2 (#5236)
* feat(bloom-filter): integrate indexer with mito2

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* rename skippingindextype

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-12-25 14:30:07 +00:00
Lin Yihai
039989f77b feat: Add vec_mul function. (#5205) 2024-12-25 14:17:22 +00:00
discord9
abf34b845c feat(flow): check sink table mismatch on flow creation (#5112)
* tests: more mismatch errors

* feat: check sink table schema if exists&prompt nice err msg

* chore: rm unused variant

* chore: fmt

* chore: cargo clippy

* feat: check schema on create

* feat: better err msg when mismatch

* tests: fix a schema mismatch

* todo: create sink table

* feat: create sink table

* fix: find time index

* tests: auto created sink table

* fix: remove empty keys

* refactor: per review

* chore: fmt

* test: sqlness

* chore: after rebase
2024-12-25 13:42:37 +00:00
Ruihang Xia
4051be4214 feat: add some critical metrics to flownode (#5235)
* feat: add some critical metrics to flownode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-25 10:57:21 +00:00
zyy17
5e88c80394 feat: introduce the Limiter in frontend to limit the requests by in-flight write bytes size. (#5231)
feat: introduct Limiter to limit in-flight write bytes size in frontend
2024-12-25 09:11:30 +00:00
discord9
6a46f391cc ci: upload .pdb files too for better windows debug (#5224)
ci: upload .pdb files too
2024-12-25 08:10:57 +00:00
Zhenchi
c96903e60c feat(bloom-filter): impl batch push to creator (#5225)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-12-25 07:53:53 +00:00
Ruihang Xia
a23f269bb1 fix: correct write cache's metric labels (#5227)
* refactor: remove unused field in WriteCache

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: unify read and write cache path

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update config and fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unnecessary methods and adapt test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change the default path

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove remote-home

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-12-25 07:26:21 +00:00
828 changed files with 40695 additions and 30903 deletions

View File

@@ -41,8 +41,8 @@ runs:
image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }}
docker-file: docker/ci/ubuntu/Dockerfile
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
platforms: linux/amd64,linux/arm64
push-latest-tag: ${{ inputs.push-latest-tag }}

View File

@@ -48,24 +48,11 @@ runs:
path: /tmp/greptime-*.log
retention-days: 3
- name: Build standard greptime
- name: Build greptime # Builds standard greptime binary
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: pyo3_backend,servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard
features: servers/dashboard,pg_kvbackend
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
@@ -83,7 +70,7 @@ runs:
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard
features: servers/dashboard,pg_kvbackend
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}

View File

@@ -33,15 +33,6 @@ runs:
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
shell: pwsh
run: pip install pyarrow numpy
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
@@ -76,5 +67,5 @@ runs:
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
version: ${{ inputs.version }}

View File

@@ -9,8 +9,8 @@ runs:
steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR}
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ...

View File

@@ -30,9 +30,9 @@ runs:
done
# The compressed artifacts will use the following layout:
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-pyo3-v0.3.0
# greptime-linux-amd64-v0.3.0sha256sum
# greptime-linux-amd64-v0.3.0.tar.gz
# greptime-linux-amd64-v0.3.0
# └── greptime
- name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }}

View File

@@ -27,11 +27,11 @@ function upload_artifacts() {
# ├── latest-version.txt
# ├── latest-nightly-version.txt
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
# └── greptime-darwin-amd64-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"

View File

@@ -1,9 +1,6 @@
name: Check Dependencies
on:
push:
branches:
- main
pull_request:
branches:
- main

View File

@@ -1,4 +1,6 @@
on:
schedule:
- cron: "0 15 * * 1-5"
merge_group:
pull_request:
types: [ opened, synchronize, reopened, ready_for_review ]
@@ -10,17 +12,6 @@ on:
- 'docker/**'
- '.gitignore'
- 'grafana/**'
push:
branches:
- main
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
workflow_dispatch:
name: CI
@@ -54,7 +45,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-2022, ubuntu-20.04 ]
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -68,6 +59,8 @@ jobs:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo check
run: cargo check --locked --workspace --all-targets
@@ -78,13 +71,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
- name: Run taplo
run: taplo format --check
@@ -105,13 +93,15 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-binaries"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
run: cargo install cargo-gc-bin --force
- name: Build greptime binaries
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
- name: Pack greptime binaries
shell: bash
run: |
@@ -153,17 +143,12 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
cargo +nightly install cargo-fuzz cargo-gc-bin --force
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
@@ -211,16 +196,11 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin
cargo install cargo-fuzz cargo-gc-bin --force
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
@@ -266,13 +246,15 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
run: cargo install cargo-gc-bin --force
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
- name: Pack greptime binary
shell: bash
run: |
@@ -328,17 +310,12 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
cargo +nightly install cargo-fuzz cargo-gc-bin --force
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
@@ -477,17 +454,12 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
cargo +nightly install cargo-fuzz cargo-gc-bin --force
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
@@ -584,13 +556,16 @@ jobs:
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
- name: "Pg Kvbackend"
opts: "--setup-pg"
kafka: false
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
working-directory: tests-integration/fixtures
run: docker compose up -d --wait kafka
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
@@ -620,11 +595,6 @@ jobs:
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format
run: make fmt-check
@@ -646,11 +616,70 @@ jobs:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy
run: make clippy
conflict-check:
name: Check for conflict
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Merge Conflict Finder
uses: olivernybroe/action-conflict-finder@v4.0
test:
if: github.event_name != 'merge_group'
runs-on: ubuntu-22.04-arm
timeout-minutes: 60
needs: [conflict-check, clippy, fmt]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Run nextest cases
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
RUST_MIN_STACK: 8388608 # 8MB
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
coverage:
if: github.event.pull_request.draft == false
if: github.event_name == 'merge_group'
runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
steps:
@@ -658,48 +687,29 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools-preview
components: llvm-tools
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Setup etcd server
working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}

View File

@@ -66,6 +66,11 @@ jobs:
steps:
- run: 'echo "No action required"'
test:
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
runs-on: ${{ matrix.os }}

View File

@@ -1,6 +1,6 @@
on:
schedule:
- cron: "0 23 * * 1-5"
- cron: "0 23 * * 1-4"
workflow_dispatch:
name: Nightly CI
@@ -91,18 +91,12 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
run: cargo nextest run -F dashboard
env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1
@@ -115,14 +109,15 @@ jobs:
UNITTEST_LOG_DIR: "__unittest_logs"
cleanbuild-linux-nix:
runs-on: ubuntu-latest-8-cores
name: Run clean build on Linux
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v27
with:
nix_path: nixpkgs=channel:nixos-unstable
- run: nix-shell --pure --run "cargo build"
nix_path: nixpkgs=channel:nixos-24.11
- run: nix develop --command cargo build
check-status:
name: Check status

View File

@@ -222,18 +222,10 @@ jobs:
arch: aarch64-apple-darwin
features: servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: pyo3_backend,servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
@@ -271,10 +263,6 @@ jobs:
arch: x86_64-pc-windows-msvc
features: servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
@@ -448,6 +436,22 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-doc-version:
name: Bump doc version
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Bump doc version
working-directory: cyborg
run: pnpm tsx bin/bump-doc-version.ts
env:
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team

3418
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -55,7 +55,6 @@ members = [
"src/promql",
"src/puffin",
"src/query",
"src/script",
"src/servers",
"src/session",
"src/sql",
@@ -79,8 +78,6 @@ clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies]
@@ -91,14 +88,18 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "51.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "51.0", features = ["serde"] }
arrow = { version = "53.0.0", features = ["prettyprint"] }
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "53.0"
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "53.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
# Remember to update axum-extra, axum-macros when updating axum
axum = "0.8"
axum-extra = "0.10"
axum-macros = "0.4"
backon = "1"
base64 = "0.21"
bigdecimal = "0.4.2"
bitflags = "2.4.1"
@@ -109,35 +110,43 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
deadpool = "0.10"
deadpool-postgres = "0.12"
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = "0.13"
etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "683e9d10ae7f3dfb8aaabd89082fc600c17e3795" }
hex = "0.4"
http = "1"
humantime = "2.1"
humantime-serde = "1.1"
hyper = "1.1"
hyper-util = "0.1"
itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4"
local-ip-address = "0.6"
loki-api = { git = "https://github.com/shuiyisong/tracing-loki", branch = "chore/prost_version" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
moka = "0.12"
nalgebra = "0.33"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { version = "0.5", features = [
opentelemetry-proto = { version = "0.27", features = [
"gen-tonic",
"metrics",
"trace",
@@ -145,12 +154,12 @@ opentelemetry-proto = { version = "0.5", features = [
"logs",
] }
parking_lot = "0.12"
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4.3", features = ["ser"] }
prost = "0.12"
prost = "0.13"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
ratelimit = "0.9"
@@ -169,28 +178,30 @@ rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
rustc-hash = "2.0"
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"
shadow-rs = "0.35"
shadow-rs = "0.38"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
# on branch v0.52.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
"visitor",
"serde",
] }
] } # on branch v0.44.x
strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7"
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = "0.4"
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
tower = "0.5"
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
@@ -252,7 +263,6 @@ plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
@@ -262,9 +272,9 @@ table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
@@ -165,15 +165,14 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
fuzz-ls: ## List all fuzz targets.
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check

View File

@@ -138,7 +138,8 @@ Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
* Python toolchain (optional): Required only if using some test scripts.
Build GreptimeDB binary:
@@ -228,4 +229,3 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -18,6 +18,7 @@
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -25,6 +26,8 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
@@ -90,10 +93,12 @@
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
@@ -131,10 +136,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -142,26 +147,33 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -195,6 +207,7 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -205,9 +218,11 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -286,9 +301,11 @@
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addrs` | Array | -- | Store server address default to etcd store. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
@@ -316,7 +333,7 @@
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -371,7 +388,7 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
@@ -421,7 +438,7 @@
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
@@ -459,10 +476,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -470,26 +487,33 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -522,12 +546,18 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |

View File

@@ -59,7 +59,7 @@ body_limit = "64MB"
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
hostname = "127.0.0.1:3001"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
@@ -294,7 +294,7 @@ data_home = "/tmp/greptimedb/"
type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
## @toml2docs:none-default
#+ cache_path = ""
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_experimental_write_cache = false
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
experimental_write_cache_path = ""
## File system path for write cache, defaults to `{data_home}`.
write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
experimental_write_cache_size = "5GiB"
write_cache_size = "5GiB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -516,6 +516,15 @@ aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -543,15 +552,6 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
@@ -576,6 +576,30 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## The options for bloom filter index in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for the index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
@@ -598,6 +622,12 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine.
[region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -5,6 +5,12 @@ mode = "distributed"
## @toml2docs:none-default
node_id = 14
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -19,6 +25,16 @@ max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The metasrv client options.
[meta_client]

View File

@@ -2,6 +2,10 @@
## @toml2docs:none-default
default_timezone = "UTC"
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
@@ -27,6 +31,12 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options.
[grpc]
@@ -34,7 +44,7 @@ body_limit = "64MB"
addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
hostname = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8

View File

@@ -8,13 +8,29 @@ bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
## Store server address default to etcd store.
## For postgres store, the format is:
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The datastore for meta server.
backend = "EtcdStore"
## Available values:
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
## **Only used when backend is `postgres_store`.**
meta_table_name = "greptime_metakv"
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
## Only used when backend is `postgres_store`.
meta_election_lock_id = 1
## Datanode selector type.
## - `round_robin` (default value)
@@ -113,6 +129,8 @@ num_topics = 64
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## Only accepts strings that match the following regular expression pattern:
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic"

View File

@@ -18,6 +18,10 @@ max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
#+ enable_telemetry = true
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
@@ -35,6 +39,12 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options.
[grpc]
@@ -280,6 +290,12 @@ max_retry_times = 3
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
# Example of using S3 as the storage.
# [storage]
# type = "S3"
@@ -333,7 +349,7 @@ data_home = "/tmp/greptimedb/"
type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
## @toml2docs:none-default
#+ cache_path = ""
@@ -514,18 +530,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_experimental_write_cache = false
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
experimental_write_cache_path = ""
## File system path for write cache, defaults to `{data_home}`.
write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
experimental_write_cache_size = "5GiB"
write_cache_size = "5GiB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -555,6 +571,15 @@ aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -582,15 +607,6 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
@@ -615,6 +631,30 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## The options for bloom filter in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the bloom filter on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the bloom filter on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the bloom filter on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for bloom filter creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
@@ -637,6 +677,12 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine.
[region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -0,0 +1,75 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const docsClient = obtainClient("DOCS_REPO_TOKEN")
try {
await docsClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "docs",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
function determineWorkflow(version: string): [string, string] {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
try {
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
triggerWorkflow(workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

View File

@@ -13,8 +13,6 @@ RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
# Install protoc
@@ -24,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
@@ -43,8 +41,6 @@ RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
WORKDIR /greptime

View File

@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
@@ -20,10 +18,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
curl \
git \
build-essential \
pkg-config \
python3.10 \
python3.10-dev \
python3-pip
pkg-config
# Install Rust.
SHELL ["/bin/bash", "-c"]
@@ -46,15 +41,8 @@ ARG OUTPUT_DIR
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
-y install ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH

View File

@@ -7,9 +7,7 @@ RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel
centos-release-scl
ARG TARGETARCH

View File

@@ -8,15 +8,8 @@ ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/

View File

@@ -9,16 +9,20 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
unzip \
build-essential \
pkg-config \
python3 \
python3-dev \
python3-pip \
&& pip3 install --upgrade pip \
&& pip3 install pyarrow
pkg-config
# Install protoc
ARG PROTOBUF_VERSION=29.3
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Trust workdir
RUN git config --global --add safe.directory /greptimedb

View File

@@ -12,18 +12,21 @@ RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
# Install protoc
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
ARG PROTOBUF_VERSION=29.3
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN

View File

@@ -6,11 +6,8 @@ ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
@@ -20,39 +17,24 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
git \
build-essential \
pkg-config \
python3.10 \
python3.10-dev
pkg-config
ARG TARGETPLATFORM
RUN echo "target platform: $TARGETPLATFORM"
ARG PROTOBUF_VERSION=29.3
# Install protobuf, because the one in the apt is too old (v3.12).
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
# Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \
apt-get -y autoremove && \
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
@@ -65,10 +47,6 @@ RUN apt-get -y purge python3.8 && \
# it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*'
# Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y

View File

@@ -21,7 +21,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
pkg-config
# Install protoc.
ENV PROTOC_VERSION=25.1
ENV PROTOC_VERSION=29.3
RUN if [ "$(uname -m)" = "x86_64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
elif [ "$(uname -m)" = "aarch64" ]; then \

View File

@@ -39,14 +39,16 @@ services:
container_name: metasrv
ports:
- 3002:3002
- 3000:3000
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
- --http-addr=0.0.0.0:3000
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
interval: 5s
timeout: 3s
retries: 5
@@ -73,10 +75,10 @@ services:
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck:
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
interval: 5s
timeout: 3s
retries: 5
retries: 10
depends_on:
metasrv:
condition: service_healthy
@@ -115,6 +117,7 @@ services:
container_name: flownode0
ports:
- 4004:4004
- 4005:4005
command:
- flownode
- start
@@ -122,9 +125,15 @@ services:
- --metasrv-addrs=metasrv:3002
- --rpc-addr=0.0.0.0:4004
- --rpc-hostname=flownode0:4004
- --http-addr=0.0.0.0:4005
depends_on:
frontend0:
condition: service_healthy
healthcheck:
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb

View File

@@ -1,5 +0,0 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

100
flake.lock generated Normal file
View File

@@ -0,0 +1,100 @@
{
"nodes": {
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1737613896,
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
"owner": "nix-community",
"repo": "fenix",
"rev": "303a062fdd8e89f233db05868468975d17855d80",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1737569578,
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1737581772,
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

56
flake.nix Normal file
View File

@@ -0,0 +1,56 @@
{
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, fenix, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
buildInputs = with pkgs; [
libgit2
libz
];
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
};
in
{
devShells.default = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
git
clang
gcc
protobuf
gnumake
mold
(rustToolchain.withComponents [
"cargo"
"clippy"
"rust-src"
"rustc"
"rustfmt"
"rust-analyzer"
"llvm-tools"
])
cargo-nextest
cargo-llvm-cov
taplo
curl
gnuplot ## for cargo bench
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
};
});
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,2 @@
[toolchain]
channel = "nightly-2024-10-19"
components = ["rust-analyzer"]
channel = "nightly-2024-12-25"

View File

@@ -14,6 +14,7 @@
import os
import re
from multiprocessing import Pool
def find_rust_files(directory):
@@ -33,13 +34,11 @@ def extract_branch_names(file_content):
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files):
def check_snafu_in_files(branch_name, rust_files_content):
branch_name_snafu = f"{branch_name}Snafu"
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
for content in rust_files_content.values():
if branch_name_snafu in content:
return True
return False
@@ -49,21 +48,24 @@ def main():
for error_file in error_files:
with open(error_file, "r") as file:
content = file.read()
branch_names.extend(extract_branch_names(content))
branch_names.extend(extract_branch_names(file.read()))
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
# Read all rust files into memory once
rust_files_content = {}
for rust_file in other_rust_files:
with open(rust_file, "r") as file:
rust_files_content[rust_file] = file.read()
with Pool() as pool:
results = pool.starmap(
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
)
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
if unused_snafu:
print("Unused error variants:")
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)

View File

@@ -1,27 +0,0 @@
let
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
pkgs = import nixpkgs { config = {}; overlays = []; };
in
pkgs.mkShell rec {
nativeBuildInputs = with pkgs; [
pkg-config
git
clang
gcc
protobuf
mold
(fenix.fromToolchainFile {
dir = ./.;
})
cargo-nextest
taplo
];
buildInputs = with pkgs; [
libgit2
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
}

View File

@@ -33,7 +33,7 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: prost::DecodeError,
error: prost::UnknownEnumValue,
},
#[snafu(display("Failed to create column datatype from {:?}", from))]

View File

@@ -86,7 +86,7 @@ impl ColumnDataTypeWrapper {
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext.clone())
(self.datatype, self.datatype_ext)
}
}
@@ -685,14 +685,18 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
values.interval_year_month_values,
)),
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
values.interval_day_time_values,
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values(
values
.interval_day_time_values
.iter()
.map(|x| IntervalDayTime::from_i64(*x).into()),
)),
IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values.interval_month_day_nano_values.iter().map(|x| {
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
}),
values
.interval_month_day_nano_values
.iter()
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
))
}
},
@@ -1495,14 +1499,22 @@ mod tests {
column.values.as_ref().unwrap().interval_year_month_values
);
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![
IntervalDayTime::new(0, 4).into(),
IntervalDayTime::new(0, 5).into(),
IntervalDayTime::new(0, 6).into(),
]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().interval_day_time_values
);
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![
IntervalMonthDayNano::new(0, 0, 7).into(),
IntervalMonthDayNano::new(0, 0, 8).into(),
IntervalMonthDayNano::new(0, 0, 9).into(),
]));
let len = vector.len();
push_vals(&mut column, 3, vector);
(0..len).for_each(|i| {

View File

@@ -34,10 +34,8 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = ColumnDataTypeWrapper::try_new(
column_def.data_type,
column_def.datatype_extension.clone(),
)?;
let data_type =
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
let constraint = if column_def.default_constraint.is_empty() {
None
@@ -57,13 +55,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
}
if let Some(options) = column_def.options.as_ref() {
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
}
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
}
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
}
}
@@ -82,7 +80,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options
.options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
}
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
options
@@ -102,7 +100,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
options
.as_ref()
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
}
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
@@ -181,14 +179,14 @@ mod tests {
let options = options_from_column_schema(&schema);
assert!(options.is_none());
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
})
.unwrap()
.set_inverted_index(true);
.unwrap();
schema.set_inverted_index(true);
let options = options_from_column_schema(&schema).unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),

View File

@@ -122,13 +122,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
table_info: String,
@@ -343,9 +336,7 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::Internal { source, .. } => source.status_code(),
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),

View File

@@ -303,7 +303,7 @@ impl KvBackend for CachedKvBackend {
.lock()
.unwrap()
.as_ref()
.map_or(false, |v| !self.validate_version(*v))
.is_some_and(|v| !self.validate_version(*v))
{
self.cache.invalidate(key).await;
}

View File

@@ -41,6 +41,7 @@ pub mod information_schema {
}
pub mod table_source;
#[async_trait::async_trait]
pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any;

View File

@@ -64,6 +64,7 @@ const INIT_CAPACITY: usize = 42;
/// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer.
///
#[derive(Debug)]
pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -45,6 +45,7 @@ use crate::error::{
use crate::information_schema::Predicates;
use crate::CatalogManager;
#[derive(Debug)]
pub(super) struct InformationSchemaColumns {
schema: SchemaRef,
catalog_name: String,

View File

@@ -61,7 +61,7 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
pub const OPTIONS: &str = "options";
/// The `information_schema.flows` to provides information about flows in databases.
///
#[derive(Debug)]
pub(super) struct InformationSchemaFlows {
schema: SchemaRef,
catalog_name: String,

View File

@@ -58,8 +58,11 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
/// Fulltext index constraint name
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
/// Skipping index constraint name
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
#[derive(Debug)]
pub(super) struct InformationSchemaKeyColumnUsage {
schema: SchemaRef,
catalog_name: String,
@@ -225,6 +228,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
let keys = &table_info.meta.primary_key_indices;
let schema = table.schema();
// For compatibility, use primary key columns as inverted index columns.
let pk_as_inverted_index = !schema
.column_schemas()
.iter()
.any(|c| c.has_inverted_index_key());
for (idx, column) in schema.column_schemas().iter().enumerate() {
let mut constraints = vec![];
if column.is_time_index() {
@@ -242,14 +251,20 @@ impl InformationSchemaKeyColumnUsageBuilder {
// TODO(dimbtp): foreign key constraint not supported yet
if keys.contains(&idx) {
constraints.push(PRI_CONSTRAINT_NAME);
if pk_as_inverted_index {
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
}
}
if column.is_inverted_indexed() {
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
}
if column.has_fulltext_index_key() {
if column.is_fulltext_indexed() {
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
}
if column.is_skipping_indexed() {
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
}
if !constraints.is_empty() {
let aggregated_constraints = constraints.join(", ");

View File

@@ -59,6 +59,7 @@ const INIT_CAPACITY: usize = 42;
/// The `PARTITIONS` table provides information about partitioned tables.
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
#[derive(Debug)]
pub(super) struct InformationSchemaPartitions {
schema: SchemaRef,
catalog_name: String,

View File

@@ -56,7 +56,7 @@ const INIT_CAPACITY: usize = 42;
/// - `end_time`: the ending execution time of the procedure.
/// - `status`: the status of the procedure.
/// - `lock_keys`: the lock keys of the procedure.
///
#[derive(Debug)]
pub(super) struct InformationSchemaProcedureInfo {
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -59,7 +59,7 @@ const INIT_CAPACITY: usize = 42;
/// - `is_leader`: whether the peer is the leader
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
/// - `down_seconds`: the duration of being offline, in seconds.
///
#[derive(Debug)]
pub(super) struct InformationSchemaRegionPeers {
schema: SchemaRef,
catalog_name: String,

View File

@@ -63,7 +63,7 @@ const INIT_CAPACITY: usize = 42;
/// - `index_size`: The sst index files size in bytes.
/// - `engine`: The engine type.
/// - `region_role`: The region role.
///
#[derive(Debug)]
pub(super) struct InformationSchemaRegionStatistics {
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -38,6 +38,7 @@ use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, RUNTIME_METRICS};
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
#[derive(Debug)]
pub(super) struct InformationSchemaMetrics {
schema: SchemaRef,
}

View File

@@ -49,6 +49,7 @@ pub const SCHEMA_OPTS: &str = "options";
const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation.
#[derive(Debug)]
pub(super) struct InformationSchemaSchemata {
schema: SchemaRef,
catalog_name: String,

View File

@@ -43,6 +43,7 @@ use crate::information_schema::Predicates;
use crate::CatalogManager;
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
#[derive(Debug)]
pub(super) struct InformationSchemaTableConstraints {
schema: SchemaRef,
catalog_name: String,

View File

@@ -71,6 +71,7 @@ const TABLE_ID: &str = "table_id";
pub const ENGINE: &str = "engine";
const INIT_CAPACITY: usize = 42;
#[derive(Debug)]
pub(super) struct InformationSchemaTables {
schema: SchemaRef,
catalog_name: String,

View File

@@ -54,6 +54,7 @@ pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
pub const COLLATION_CONNECTION: &str = "collation_connection";
/// The `information_schema.views` to provides information about views in databases.
#[derive(Debug)]
pub(super) struct InformationSchemaViews {
schema: SchemaRef,
catalog_name: String,

View File

@@ -33,6 +33,7 @@ use super::SystemTable;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
/// A memory table with specified schema and columns.
#[derive(Debug)]
pub(crate) struct MemoryTable {
pub(crate) table_id: TableId,
pub(crate) table_name: &'static str,

View File

@@ -14,6 +14,7 @@
mod pg_catalog_memory_table;
mod pg_class;
mod pg_database;
mod pg_namespace;
mod table_names;
@@ -26,6 +27,7 @@ use lazy_static::lazy_static;
use paste::paste;
use pg_catalog_memory_table::get_schema_columns;
use pg_class::PGClass;
use pg_database::PGDatabase;
use pg_namespace::PGNamespace;
use session::context::{Channel, QueryContext};
use table::TableRef;
@@ -113,6 +115,10 @@ impl PGCatalogProvider {
PG_CLASS.to_string(),
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
);
tables.insert(
PG_DATABASE.to_string(),
self.build_table(PG_DATABASE).expect(PG_DATABASE),
);
self.tables = tables;
}
}
@@ -135,6 +141,11 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
_ => None,
}
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -100,6 +101,15 @@ impl PGClass {
}
}
impl fmt::Debug for PGClass {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PGClass")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl SystemTable for PGClass {
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_CLASS_TABLE_ID

View File

@@ -0,0 +1,223 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::Predicates;
use crate::system_schema::utils::tables::{string_column, u32_column};
use crate::system_schema::SystemTable;
use crate::CatalogManager;
// === column name ===
pub const DATNAME: &str = "datname";
/// The initial capacity of the vector builders.
const INIT_CAPACITY: usize = 42;
/// The `pg_catalog.database` table implementation.
pub(super) struct PGDatabase {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
// Workaround to convert schema_name to a numeric id
namespace_oid_map: PGNamespaceOidMapRef,
}
impl std::fmt::Debug for PGDatabase {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PGDatabase")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl PGDatabase {
pub(super) fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
namespace_oid_map,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
u32_column(OID_COLUMN_NAME),
string_column(DATNAME),
]))
}
fn builder(&self) -> PGCDatabaseBuilder {
PGCDatabaseBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
)
}
}
impl DfPartitionStream for PGDatabase {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
impl SystemTable for PGDatabase {
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_DATABASE_TABLE_ID
}
fn table_name(&self) -> &'static str {
PG_DATABASE
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(
&self,
request: ScanRequest,
) -> Result<common_recordbatch::SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `pg_catalog.pg_database` table row by row
/// `oid` use schema name as a workaround since we don't have numeric schema id.
/// `nspname` is the schema name.
struct PGCDatabaseBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
oid: UInt32VectorBuilder,
datname: StringVectorBuilder,
}
impl PGCDatabaseBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
namespace_oid_map,
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager
.schema_names(&catalog_name, query_ctx())
.await?
{
self.add_database(&predicates, &schema_name);
}
self.finish()
}
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
let oid = self.namespace_oid_map.get_oid(schema_name);
let row: [(&str, &Value); 2] = [
(OID_COLUMN_NAME, &Value::from(oid)),
(DATNAME, &Value::from(schema_name)),
];
if !predicates.eval(&row) {
return;
}
self.oid.push(Some(oid));
self.datname.push(Some(schema_name));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> =
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}

View File

@@ -17,6 +17,7 @@
pub(super) mod oid_map;
use std::fmt;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -87,6 +88,15 @@ impl PGNamespace {
}
}
impl fmt::Debug for PGNamespace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PGNamespace")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl SystemTable for PGNamespace {
fn schema(&self) -> SchemaRef {
self.schema.clone()

View File

@@ -12,7 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub const PG_DATABASE: &str = "pg_databases";
// https://www.postgresql.org/docs/current/catalog-pg-database.html
pub const PG_DATABASE: &str = "pg_database";
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
pub const PG_NAMESPACE: &str = "pg_namespace";
// https://www.postgresql.org/docs/current/catalog-pg-class.html
pub const PG_CLASS: &str = "pg_class";
// https://www.postgresql.org/docs/current/catalog-pg-type.html
pub const PG_TYPE: &str = "pg_type";

View File

@@ -365,7 +365,7 @@ mod tests {
Projection: person.id AS a, person.name AS b
Filter: person.id > Int32(500)
TableScan: person"#,
format!("\n{:?}", source.get_logical_plan().unwrap())
format!("\n{}", source.get_logical_plan().unwrap())
);
}
}

View File

@@ -15,12 +15,12 @@
//! Dummy catalog for region server.
use std::any::Any;
use std::fmt;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::format_full_table_name;
use datafusion::catalog::schema::SchemaProvider;
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider};
use datafusion::datasource::TableProvider;
use snafu::OptionExt;
use table::table::adapter::DfTableProviderAdapter;
@@ -41,6 +41,12 @@ impl DummyCatalogList {
}
}
impl fmt::Debug for DummyCatalogList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummyCatalogList").finish()
}
}
impl CatalogProviderList for DummyCatalogList {
fn as_any(&self) -> &dyn Any {
self
@@ -91,6 +97,14 @@ impl CatalogProvider for DummyCatalogProvider {
}
}
impl fmt::Debug for DummyCatalogProvider {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummyCatalogProvider")
.field("catalog_name", &self.catalog_name)
.finish()
}
}
/// A dummy schema provider for [DummyCatalogList].
#[derive(Clone)]
struct DummySchemaProvider {
@@ -127,3 +141,12 @@ impl SchemaProvider for DummySchemaProvider {
true
}
}
impl fmt::Debug for DummySchemaProvider {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummySchemaProvider")
.field("catalog_name", &self.catalog_name)
.field("schema_name", &self.schema_name)
.finish()
}
}

View File

@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
pg_kvbackend = ["common-meta/pg_kvbackend"]
[lints]
workspace = true
@@ -15,7 +18,7 @@ cache.workspace = true
catalog.workspace = true
chrono.workspace = true
clap.workspace = true
client.workspace = true
client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-catalog.workspace = true
common-config.workspace = true
@@ -56,8 +59,6 @@ tokio.workspace = true
tracing-appender.workspace = true
[dev-dependencies]
client = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
common-version.workspace = true
serde.workspace = true
tempfile.workspace = true

View File

@@ -22,6 +22,9 @@ use clap::Parser;
use common_error::ext::BoxedError;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::memory::MemoryKvBackend;
#[cfg(feature = "pg_kvbackend")]
use common_meta::kv_backend::postgres::PgStore;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_telemetry::info;
@@ -55,18 +58,34 @@ where
#[derive(Debug, Default, Parser)]
pub struct BenchTableMetadataCommand {
#[clap(long)]
etcd_addr: String,
etcd_addr: Option<String>,
#[cfg(feature = "pg_kvbackend")]
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
count: u32,
}
impl BenchTableMetadataCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
.await
.unwrap();
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
info!("Using etcd as kv backend");
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
} else {
Arc::new(MemoryKvBackend::new())
};
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
#[cfg(feature = "pg_kvbackend")]
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
info!("Using postgres as kv backend");
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
.await
.unwrap()
} else {
kv_backend
};
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
let tool = BenchTableMetadata {
table_metadata_manager,

View File

@@ -10,9 +10,8 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = ["python", "servers/pprof", "servers/mem-prof"]
default = ["servers/pprof", "servers/mem-prof"]
tokio-console = ["common-telemetry/tokio-console"]
python = ["frontend/python"]
[lints]
workspace = true
@@ -58,6 +57,7 @@ humantime.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
metric-engine.workspace = true
mito2.workspace = true
moka.workspace = true
nu-ansi-term = "0.46"

View File

@@ -51,8 +51,7 @@ impl App for Instance {
}
async fn start(&mut self) -> Result<()> {
self.start().await.unwrap();
Ok(())
self.start().await
}
fn wait_signal(&self) -> bool {

View File

@@ -62,6 +62,11 @@ impl Instance {
pub fn datanode(&self) -> &Datanode {
&self.datanode
}
/// allow customizing datanode for downstream projects
pub fn datanode_mut(&mut self) -> &mut Datanode {
&mut self.datanode
}
}
#[async_trait]
@@ -271,7 +276,8 @@ impl StartCommand {
info!("Datanode options: {:#?}", opts);
let plugin_opts = opts.plugins;
let opts = opts.component;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let mut plugins = Plugins::new();
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -345,6 +345,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to build wal options allocator"))]
BuildWalOptionsAllocator {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -378,7 +385,8 @@ impl ErrorExt for Error {
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
Error::BuildWalOptionsAllocator { source, .. }
| Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
StatusCode::Internal
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension;
@@ -66,6 +67,11 @@ impl Instance {
pub fn flownode(&self) -> &FlownodeInstance {
&self.flownode
}
/// allow customizing flownode for downstream projects
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
}
#[async_trait::async_trait]
@@ -137,6 +143,11 @@ struct StartCommand {
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
env_prefix: String,
#[clap(long)]
http_addr: Option<String>,
/// HTTP request timeout in seconds.
#[clap(long)]
http_timeout: Option<u64>,
}
impl StartCommand {
@@ -193,6 +204,14 @@ impl StartCommand {
opts.mode = Mode::Distributed;
}
if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
opts.http.timeout = Duration::from_secs(http_timeout);
}
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
return MissingConfigSnafu {
msg: "Missing node id option",
@@ -217,7 +236,8 @@ impl StartCommand {
info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts);
let opts = opts.component;
let mut opts = opts.component;
opts.grpc.detect_hostname();
// TODO(discord9): make it not optionale after cluster id is required
let cluster_id = opts.cluster_id.unwrap_or(0);

View File

@@ -268,7 +268,8 @@ impl StartCommand {
info!("Frontend options: {:#?}", opts);
let plugin_opts = opts.plugins;
let opts = opts.component;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let mut plugins = Plugins::new();
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -249,8 +249,6 @@ impl StartCommand {
if let Some(backend) = &self.backend {
opts.backend.clone_from(backend);
} else {
opts.backend = BackendImpl::default()
}
// Disable dashboard in metasrv.
@@ -274,7 +272,8 @@ impl StartCommand {
info!("Metasrv options: {:#?}", opts);
let plugin_opts = opts.plugins;
let opts = opts.component;
let mut opts = opts.component;
opts.detect_server_addr();
let mut plugins = Plugins::new();
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -22,6 +22,7 @@ use catalog::information_schema::InformationExtension;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use client::api::v1::meta::RegionRole;
use common_base::readable_size::ReadableSize;
use common_base::Plugins;
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
@@ -42,7 +43,7 @@ use common_meta::node_manager::NodeManagerRef;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
use common_telemetry::info;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
@@ -53,7 +54,7 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
@@ -75,10 +76,10 @@ use tokio::sync::{broadcast, RwLock};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, Result,
ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
BuildCacheRegistrySnafu, BuildWalOptionsAllocatorSnafu, CreateDirSnafu, IllegalConfigSnafu,
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu,
Result, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
@@ -144,6 +145,7 @@ pub struct StandaloneOptions {
pub storage: StorageConfig,
pub metadata_store: KvBackendConfig,
pub procedure: ProcedureConfig,
pub flow: FlowConfig,
pub logging: LoggingOptions,
pub user_provider: Option<String>,
/// Options for different store engines.
@@ -152,6 +154,7 @@ pub struct StandaloneOptions {
pub tracing: TracingOptions,
pub init_regions_in_background: bool,
pub init_regions_parallelism: usize,
pub max_in_flight_write_bytes: Option<ReadableSize>,
}
impl Default for StandaloneOptions {
@@ -171,6 +174,7 @@ impl Default for StandaloneOptions {
storage: StorageConfig::default(),
metadata_store: KvBackendConfig::default(),
procedure: ProcedureConfig::default(),
flow: FlowConfig::default(),
logging: LoggingOptions::default(),
export_metrics: ExportMetricsOption::default(),
user_provider: None,
@@ -181,6 +185,7 @@ impl Default for StandaloneOptions {
tracing: TracingOptions::default(),
init_regions_in_background: false,
init_regions_parallelism: 16,
max_in_flight_write_bytes: None,
}
}
}
@@ -218,6 +223,7 @@ impl StandaloneOptions {
user_provider: cloned_opts.user_provider,
// Handle the export metrics task run by standalone to frontend for execution
export_metrics: cloned_opts.export_metrics,
max_in_flight_write_bytes: cloned_opts.max_in_flight_write_bytes,
..Default::default()
}
}
@@ -457,7 +463,8 @@ impl StartCommand {
let mut plugins = Plugins::new();
let plugin_opts = opts.plugins;
let opts = opts.component;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let fe_opts = opts.frontend_options();
let dn_opts = opts.datanode_options();
@@ -518,8 +525,12 @@ impl StartCommand {
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let flownode_options = FlownodeOptions {
flow: opts.flow.clone(),
..Default::default()
};
let flow_builder = FlownodeBuilder::new(
Default::default(),
flownode_options,
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
@@ -558,10 +569,11 @@ impl StartCommand {
.step(10)
.build(),
);
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
opts.wal.clone().into(),
kv_backend.clone(),
));
let kafka_options = opts.wal.clone().into();
let wal_options_allocator = build_wal_options_allocator(&kafka_options, kv_backend.clone())
.await
.context(BuildWalOptionsAllocatorSnafu)?;
let wal_options_allocator = Arc::new(wal_options_allocator);
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),

View File

@@ -25,14 +25,16 @@ use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_E
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::DatanodeWalConfig;
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use file_engine::config::EngineConfig;
use file_engine::config::EngineConfig as FileEngineConfig;
use frontend::frontend::FrontendOptions;
use meta_client::MetaClientOptions;
use meta_srv::metasrv::MetasrvOptions;
use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
#[allow(deprecated)]
#[test]
@@ -69,10 +71,13 @@ fn test_load_datanode_example_config() {
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
}),
],
logging: LoggingOptions {
level: Some("info".to_string()),
@@ -85,7 +90,9 @@ fn test_load_datanode_example_config() {
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
grpc: GrpcOptions::default()
.with_addr("127.0.0.1:3001")
.with_hostname("127.0.0.1:3001"),
rpc_addr: Some("127.0.0.1:3001".to_string()),
rpc_hostname: Some("127.0.0.1".to_string()),
rpc_runtime_size: Some(8),
@@ -137,6 +144,11 @@ fn test_load_frontend_example_config() {
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
..Default::default()
},
..Default::default()
@@ -154,6 +166,7 @@ fn test_load_metasrv_example_config() {
component: MetasrvOptions {
selector: SelectorType::default(),
data_home: "/tmp/metasrv/".to_string(),
server_addr: "127.0.0.1:3002".to_string(),
logging: LoggingOptions {
dir: "/tmp/greptimedb/logs".to_string(),
level: Some("info".to_string()),
@@ -203,10 +216,13 @@ fn test_load_standalone_example_config() {
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
}),
],
storage: StorageConfig {
data_home: "/tmp/greptimedb/".to_string(),
@@ -223,6 +239,10 @@ fn test_load_standalone_example_config() {
remote_write: Some(Default::default()),
..Default::default()
},
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
..Default::default()
},
..Default::default()

View File

@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[lints]
workspace = true

View File

@@ -17,6 +17,7 @@ use std::io;
use std::ops::Range;
use std::path::Path;
use std::pin::Pin;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
@@ -33,19 +34,22 @@ pub struct Metadata {
pub content_length: u64,
}
/// `RangeReader` reads a range of bytes from a source.
#[async_trait]
pub trait RangeReader: Send + Unpin {
/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
pub trait SizeAwareRangeReader: RangeReader {
/// Sets the file size hint for the reader.
///
/// It's used to optimize the reading process by reducing the number of remote requests.
fn with_file_size_hint(&mut self, file_size_hint: u64);
}
/// `RangeReader` reads a range of bytes from a source.
#[async_trait]
pub trait RangeReader: Sync + Send + Unpin {
/// Returns the metadata of the source.
async fn metadata(&mut self) -> io::Result<Metadata>;
async fn metadata(&self) -> io::Result<Metadata>;
/// Reads the bytes in the given range.
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
/// Reads the bytes in the given range into the buffer.
///
@@ -53,18 +57,14 @@ pub trait RangeReader: Send + Unpin {
/// - If the buffer is insufficient to hold the bytes, it will either:
/// - Allocate additional space (e.g., for `Vec<u8>`)
/// - Panic (e.g., for `&mut [u8]`)
async fn read_into(
&mut self,
range: Range<u64>,
buf: &mut (impl BufMut + Send),
) -> io::Result<()> {
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
let bytes = self.read(range).await?;
buf.put_slice(&bytes);
Ok(())
}
/// Reads the bytes in the given ranges.
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
let mut result = Vec::with_capacity(ranges.len());
for range in ranges {
result.push(self.read(range.clone()).await?);
@@ -74,25 +74,20 @@ pub trait RangeReader: Send + Unpin {
}
#[async_trait]
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
fn with_file_size_hint(&mut self, file_size_hint: u64) {
(*self).with_file_size_hint(file_size_hint)
}
async fn metadata(&mut self) -> io::Result<Metadata> {
impl<R: ?Sized + RangeReader> RangeReader for &R {
async fn metadata(&self) -> io::Result<Metadata> {
(*self).metadata().await
}
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
(*self).read(range).await
}
async fn read_into(
&mut self,
range: Range<u64>,
buf: &mut (impl BufMut + Send),
) -> io::Result<()> {
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
(*self).read_into(range, buf).await
}
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
(*self).read_vec(ranges).await
}
}
@@ -120,7 +115,7 @@ pub struct AsyncReadAdapter<R> {
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
pub async fn new(inner: R) -> io::Result<Self> {
let mut inner = inner;
let inner = inner;
let metadata = inner.metadata().await?;
Ok(AsyncReadAdapter {
inner: Arc::new(Mutex::new(inner)),
@@ -160,7 +155,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
let range = *this.position..(*this.position + size);
let inner = this.inner.clone();
let fut = async move {
let mut inner = inner.lock().await;
let inner = inner.lock().await;
inner.read(range).await
};
@@ -195,27 +190,24 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
#[async_trait]
impl RangeReader for Vec<u8> {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
async fn metadata(&mut self) -> io::Result<Metadata> {
async fn metadata(&self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.len() as u64,
})
}
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
Ok(bytes)
}
}
// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
/// `FileReader` is a `RangeReader` for reading a file.
pub struct FileReader {
content_length: u64,
position: u64,
file: tokio::fs::File,
position: AtomicU64,
file: Mutex<tokio::fs::File>,
}
impl FileReader {
@@ -225,32 +217,36 @@ impl FileReader {
let metadata = file.metadata().await?;
Ok(FileReader {
content_length: metadata.len(),
position: 0,
file,
position: AtomicU64::new(0),
file: Mutex::new(file),
})
}
}
impl SizeAwareRangeReader for FileReader {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
}
#[async_trait]
impl RangeReader for FileReader {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
async fn metadata(&mut self) -> io::Result<Metadata> {
async fn metadata(&self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.content_length,
})
}
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
if range.start != self.position {
self.file.seek(io::SeekFrom::Start(range.start)).await?;
self.position = range.start;
async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
let mut file = self.file.lock().await;
if range.start != self.position.load(Ordering::Relaxed) {
file.seek(io::SeekFrom::Start(range.start)).await?;
self.position.store(range.start, Ordering::Relaxed);
}
range.end = range.end.min(self.content_length);
if range.end <= self.position {
if range.end <= self.position.load(Ordering::Relaxed) {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Start of range is out of bounds",
@@ -259,8 +255,8 @@ impl RangeReader for FileReader {
let mut buf = vec![0; (range.end - range.start) as usize];
self.file.read_exact(&mut buf).await?;
self.position = range.end;
file.read_exact(&mut buf).await?;
self.position.store(range.end, Ordering::Relaxed);
Ok(Bytes::from(buf))
}
@@ -301,7 +297,7 @@ mod tests {
let data = b"hello world";
tokio::fs::write(path, data).await.unwrap();
let mut reader = FileReader::new(path).await.unwrap();
let reader = FileReader::new(path).await.unwrap();
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.content_length, data.len() as u64);

View File

@@ -109,6 +109,7 @@ pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
pub const PG_CATALOG_PG_DATABASE_TABLE_ID: u32 = 259;
// ----- End of pg_catalog tables -----

View File

@@ -73,14 +73,21 @@ pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
}
let opts = layered_config
let mut opts: Self = layered_config
.build()
.and_then(|x| x.try_deserialize())
.context(LoadLayeredConfigSnafu)?;
opts.validate_sanitize()?;
Ok(opts)
}
/// Validate(and possibly sanitize) the configuration.
fn validate_sanitize(&mut self) -> Result<()> {
Ok(())
}
/// List of toml keys that should be parsed as a list.
fn env_list_keys() -> Option<&'static [&'static str]> {
None

View File

@@ -31,7 +31,7 @@ derive_builder.workspace = true
futures.workspace = true
lazy_static.workspace = true
object-store.workspace = true
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599", default-features = false, features = [
orc-rust = { version = "0.5", default-features = false, features = [
"async",
] }
parquet.workspace = true

View File

@@ -180,7 +180,7 @@ pub enum Error {
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
ParseFormat {
key: &'static str,
key: String,
value: String,
#[snafu(implicit)]
location: Location,

View File

@@ -126,8 +126,7 @@ impl ArrowDecoder for arrow::csv::reader::Decoder {
}
}
#[allow(deprecated)]
impl ArrowDecoder for arrow::json::RawDecoder {
impl ArrowDecoder for arrow::json::reader::Decoder {
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
self.decode(buf)
}

View File

@@ -17,8 +17,7 @@ use std::str::FromStr;
use std::sync::Arc;
use arrow::csv;
#[allow(deprecated)]
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
use arrow::csv::reader::Format;
use arrow::record_batch::RecordBatch;
use arrow_schema::{Schema, SchemaRef};
use async_trait::async_trait;
@@ -161,7 +160,6 @@ impl FileOpener for CsvOpener {
}
}
#[allow(deprecated)]
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
@@ -188,9 +186,12 @@ impl FileFormat for CsvFormat {
common_runtime::spawn_blocking_global(move || {
let reader = SyncIoBridge::new(decoded);
let (schema, _records_read) =
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
.context(error::InferSchemaSnafu)?;
let format = Format::default()
.with_delimiter(delimiter)
.with_header(has_header);
let (schema, _records_read) = format
.infer_schema(reader, schema_infer_max_record)
.context(error::InferSchemaSnafu)?;
Ok(schema)
})
.await
@@ -253,7 +254,7 @@ mod tests {
"c7: Int64: NULL",
"c8: Int64: NULL",
"c9: Int64: NULL",
"c10: Int64: NULL",
"c10: Utf8: NULL",
"c11: Float64: NULL",
"c12: Float64: NULL",
"c13: Utf8: NULL"

View File

@@ -20,8 +20,7 @@ use std::sync::Arc;
use arrow::datatypes::SchemaRef;
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
use arrow::json::writer::LineDelimited;
#[allow(deprecated)]
use arrow::json::{self, RawReaderBuilder};
use arrow::json::{self, ReaderBuilder};
use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
use async_trait::async_trait;
@@ -140,7 +139,6 @@ impl JsonOpener {
}
}
#[allow(deprecated)]
impl FileOpener for JsonOpener {
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
open_with_decoder(
@@ -148,7 +146,7 @@ impl FileOpener for JsonOpener {
meta.location().to_string(),
self.compression_type,
|| {
RawReaderBuilder::new(self.projected_schema.clone())
ReaderBuilder::new(self.projected_schema.clone())
.with_batch_size(self.batch_size)
.build_decoder()
.map_err(DataFusionError::from)

View File

@@ -42,7 +42,7 @@ struct Test<'a, T: FileOpener> {
expected: Vec<&'a str>,
}
impl<'a, T: FileOpener> Test<'a, T> {
impl<T: FileOpener> Test<'_, T> {
pub async fn run(self) {
let result = FileStream::new(
&self.config,

View File

@@ -35,10 +35,23 @@ data = {
"bigint_other": [5, -5, 1, 5, 5],
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
"timestamp_simple": [
datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
datetime.datetime(2023, 1, 1),
datetime.datetime(2023, 2, 1),
datetime.datetime(2023, 3, 1),
],
"date_simple": [
datetime.date(2023, 4, 1),
datetime.date(2023, 3, 1),
datetime.date(2023, 1, 1),
datetime.date(2023, 2, 1),
datetime.date(2023, 3, 1),
],
}
def infer_schema(data):
schema = "struct<"
for key, value in data.items():
@@ -56,7 +69,7 @@ def infer_schema(data):
elif key.startswith("date"):
dt = "date"
else:
print(key,value,dt)
print(key, value, dt)
raise NotImplementedError
if key.startswith("double"):
dt = "double"
@@ -68,7 +81,6 @@ def infer_schema(data):
return schema
def _write(
schema: str,
data,

View File

@@ -164,7 +164,7 @@ impl FromStr for Decimal128 {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let len = s.as_bytes().len();
let len = s.len();
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
Ok(Self::from(rd))

View File

@@ -8,6 +8,7 @@ license.workspace = true
workspace = true
[dependencies]
http.workspace = true
snafu.workspace = true
strum.workspace = true
tonic.workspace = true

View File

@@ -18,9 +18,30 @@ pub mod ext;
pub mod mock;
pub mod status_code;
use http::{HeaderMap, HeaderValue};
pub use snafu;
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
// please define in `src/servers/src/http/header.rs`.
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
/// Create a http header map from error code and message.
/// using `GREPTIME_DB_HEADER_ERROR_CODE` and `GREPTIME_DB_HEADER_ERROR_MSG` as keys.
pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
let mut header = HeaderMap::new();
let msg = HeaderValue::from_str(msg).unwrap_or_else(|_| {
HeaderValue::from_bytes(
&msg.as_bytes()
.iter()
.flat_map(|b| std::ascii::escape_default(*b))
.collect::<Vec<u8>>(),
)
.expect("Already escaped string should be valid ascii")
});
header.insert(GREPTIME_DB_HEADER_ERROR_CODE, code.into());
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
header
}

View File

@@ -33,7 +33,7 @@ geo-types = { version = "0.7", optional = true }
geohash = { version = "0.13", optional = true }
h3o = { version = "0.6", optional = true }
jsonb.workspace = true
nalgebra = "0.33"
nalgebra.workspace = true
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true

View File

@@ -25,7 +25,6 @@ use crate::scalars::expression::ExpressionFunction;
use crate::scalars::json::JsonFunction;
use crate::scalars::matches::MatchesFunction;
use crate::scalars::math::MathFunction;
use crate::scalars::numpy::NumpyFunction;
use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::vector::VectorFunction;
use crate::system::SystemFunction;
@@ -103,7 +102,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
// Utility functions
MathFunction::register(&function_registry);
NumpyFunction::register(&function_registry);
TimestampFunction::register(&function_registry);
DateFunction::register(&function_registry);
ExpressionFunction::register(&function_registry);

View File

@@ -20,7 +20,6 @@ pub mod geo;
pub mod json;
pub mod matches;
pub mod math;
pub mod numpy;
pub mod vector;
#[cfg(test)]

View File

@@ -32,6 +32,8 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
use crate::function_registry::FunctionRegistry;
use crate::scalars::vector::product::VectorProductCreator;
use crate::scalars::vector::sum::VectorSumCreator;
/// A function creates `AggregateFunctionCreator`.
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
@@ -91,6 +93,8 @@ impl AggregateFunctions {
register_aggr_func!("argmin", 1, ArgminAccumulatorCreator);
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
register_aggr_func!("vec_sum", 1, VectorSumCreator);
register_aggr_func!("vec_product", 1, VectorProductCreator);
#[cfg(feature = "geo")]
register_aggr_func!(

View File

@@ -91,6 +91,7 @@ mod tests {
use std::sync::Arc;
use common_query::prelude::{TypeSignature, Volatility};
use datatypes::arrow::datatypes::IntervalDayTime;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
@@ -134,7 +135,12 @@ mod tests {
let times = vec![Some(123), None, Some(42), None];
// Intervals in milliseconds
let intervals = vec![1000, 2000, 3000, 1000];
let intervals = vec![
IntervalDayTime::new(0, 1000),
IntervalDayTime::new(0, 2000),
IntervalDayTime::new(0, 3000),
IntervalDayTime::new(0, 1000),
];
let results = [Some(124), None, Some(45), None];
let time_vector = TimestampSecondVector::from(times.clone());

View File

@@ -91,6 +91,7 @@ mod tests {
use std::sync::Arc;
use common_query::prelude::{TypeSignature, Volatility};
use datatypes::arrow::datatypes::IntervalDayTime;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
@@ -139,7 +140,12 @@ mod tests {
let times = vec![Some(123), None, Some(42), None];
// Intervals in milliseconds
let intervals = vec![1000, 2000, 3000, 1000];
let intervals = vec![
IntervalDayTime::new(0, 1000),
IntervalDayTime::new(0, 2000),
IntervalDayTime::new(0, 3000),
IntervalDayTime::new(0, 1000),
];
let results = [Some(122), None, Some(39), None];
let time_vector = TimestampSecondVector::from(times.clone());

View File

@@ -21,10 +21,9 @@ use common_query::error::{
};
use datafusion::common::tree_node::{Transformed, TreeNode, TreeNodeIterator, TreeNodeRecursion};
use datafusion::common::{DFSchema, Result as DfResult};
use datafusion::execution::context::SessionState;
use datafusion::execution::SessionStateBuilder;
use datafusion::logical_expr::{self, Expr, Volatility};
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
use datafusion::prelude::SessionConfig;
use datatypes::arrow::array::RecordBatch;
use datatypes::arrow::datatypes::{DataType, Field};
use datatypes::prelude::VectorRef;
@@ -104,8 +103,7 @@ impl MatchesFunction {
let like_expr = ast.into_like_expr(col_name);
let input_schema = Self::input_schema();
let session_state =
SessionState::new_with_config_rt(SessionConfig::default(), Arc::default());
let session_state = SessionStateBuilder::new().with_default_features().build();
let planner = DefaultPhysicalPlanner::default();
let physical_expr = planner
.create_physical_expr(&like_expr, &input_schema, &session_state)
@@ -131,7 +129,7 @@ impl MatchesFunction {
}
fn input_schema() -> DFSchema {
DFSchema::from_unqualifed_fields(
DFSchema::from_unqualified_fields(
[Arc::new(Field::new("data", DataType::Utf8, true))].into(),
HashMap::new(),
)
@@ -725,7 +723,8 @@ struct Tokenizer {
impl Tokenizer {
pub fn tokenize(mut self, pattern: &str) -> Result<Vec<Token>> {
let mut tokens = vec![];
while self.cursor < pattern.len() {
let char_len = pattern.chars().count();
while self.cursor < char_len {
// TODO: collect pattern into Vec<char> if this tokenizer is bottleneck in the future
let c = pattern.chars().nth(self.cursor).unwrap();
match c {
@@ -794,7 +793,8 @@ impl Tokenizer {
let mut phase = String::new();
let mut is_quote_present = false;
while self.cursor < pattern.len() {
let char_len = pattern.chars().count();
while self.cursor < char_len {
let mut c = pattern.chars().nth(self.cursor).unwrap();
match c {
@@ -899,6 +899,26 @@ mod test {
Phase("c".to_string()),
],
),
(
r#"中文 测试"#,
vec![Phase("中文".to_string()), Phase("测试".to_string())],
),
(
r#"中文 AND 测试"#,
vec![Phase("中文".to_string()), And, Phase("测试".to_string())],
),
(
r#"中文 +测试"#,
vec![Phase("中文".to_string()), Must, Phase("测试".to_string())],
),
(
r#"中文 -测试"#,
vec![
Phase("中文".to_string()),
Negative,
Phase("测试".to_string()),
],
),
];
for (query, expected) in cases {
@@ -1030,6 +1050,61 @@ mod test {
],
},
),
(
r#"中文 测试"#,
PatternAst::Binary {
op: BinaryOp::Or,
children: vec![
PatternAst::Literal {
op: UnaryOp::Optional,
pattern: "中文".to_string(),
},
PatternAst::Literal {
op: UnaryOp::Optional,
pattern: "测试".to_string(),
},
],
},
),
(
r#"中文 AND 测试"#,
PatternAst::Binary {
op: BinaryOp::And,
children: vec![
PatternAst::Literal {
op: UnaryOp::Optional,
pattern: "中文".to_string(),
},
PatternAst::Literal {
op: UnaryOp::Optional,
pattern: "测试".to_string(),
},
],
},
),
(
r#"中文 +测试"#,
PatternAst::Literal {
op: UnaryOp::Must,
pattern: "测试".to_string(),
},
),
(
r#"中文 -测试"#,
PatternAst::Binary {
op: BinaryOp::And,
children: vec![
PatternAst::Literal {
op: UnaryOp::Negative,
pattern: "测试".to_string(),
},
PatternAst::Literal {
op: UnaryOp::Optional,
pattern: "中文".to_string(),
},
],
},
),
];
for (query, expected) in cases {

View File

@@ -1,30 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod clip;
mod interp;
use std::sync::Arc;
use clip::ClipFunction;
use crate::function_registry::FunctionRegistry;
pub(crate) struct NumpyFunction;
impl NumpyFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ClipFunction));
}
}

View File

@@ -1,298 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::sync::Arc;
use common_query::error::Result;
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::ArrowPrimitiveType;
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::*;
use datatypes::vectors::PrimitiveVector;
use paste::paste;
use crate::function::{Function, FunctionContext};
use crate::scalars::expression::{scalar_binary_op, EvalContext};
/// numpy.clip function, <https://numpy.org/doc/stable/reference/generated/numpy.clip.html>
#[derive(Clone, Debug, Default)]
pub struct ClipFunction;
macro_rules! define_eval {
($O: ident) => {
paste! {
fn [<eval_ $O>](columns: &[VectorRef]) -> Result<VectorRef> {
fn cast_vector(input: &VectorRef) -> VectorRef {
Arc::new(PrimitiveVector::<<$O as WrapperType>::LogicalType>::try_from_arrow_array(
compute::cast(&input.to_arrow_array(), &<<<$O as WrapperType>::LogicalType as LogicalPrimitiveType>::ArrowPrimitive as ArrowPrimitiveType>::DATA_TYPE).unwrap()
).unwrap()) as _
}
let operator_1 = cast_vector(&columns[0]);
let operator_2 = cast_vector(&columns[1]);
let operator_3 = cast_vector(&columns[2]);
// clip(a, min, max) is equals to min(max(a, min), max)
let col: VectorRef = Arc::new(scalar_binary_op::<$O, $O, $O, _>(
&operator_1,
&operator_2,
scalar_max,
&mut EvalContext::default(),
)?);
let col = scalar_binary_op::<$O, $O, $O, _>(
&col,
&operator_3,
scalar_min,
&mut EvalContext::default(),
)?;
Ok(Arc::new(col))
}
}
};
}
define_eval!(i64);
define_eval!(u64);
define_eval!(f64);
impl Function for ClipFunction {
fn name(&self) -> &str {
"clip"
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
if input_types.iter().all(ConcreteDataType::is_signed) {
Ok(ConcreteDataType::int64_datatype())
} else if input_types.iter().all(ConcreteDataType::is_unsigned) {
Ok(ConcreteDataType::uint64_datatype())
} else {
Ok(ConcreteDataType::float64_datatype())
}
}
fn signature(&self) -> Signature {
Signature::uniform(3, ConcreteDataType::numerics(), Volatility::Immutable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
if columns.iter().all(|v| v.data_type().is_signed()) {
eval_i64(columns)
} else if columns.iter().all(|v| v.data_type().is_unsigned()) {
eval_u64(columns)
} else {
eval_f64(columns)
}
}
}
#[inline]
pub fn min<T: PartialOrd>(input: T, min: T) -> T {
if input < min {
input
} else {
min
}
}
#[inline]
pub fn max<T: PartialOrd>(input: T, max: T) -> T {
if input > max {
input
} else {
max
}
}
#[inline]
fn scalar_min<O>(left: Option<O>, right: Option<O>, _ctx: &mut EvalContext) -> Option<O>
where
O: Scalar + Copy + PartialOrd,
{
match (left, right) {
(Some(left), Some(right)) => Some(min(left, right)),
_ => None,
}
}
#[inline]
fn scalar_max<O>(left: Option<O>, right: Option<O>, _ctx: &mut EvalContext) -> Option<O>
where
O: Scalar + Copy + PartialOrd,
{
match (left, right) {
(Some(left), Some(right)) => Some(max(left, right)),
_ => None,
}
}
impl fmt::Display for ClipFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CLIP")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::{
ConstantVector, Float32Vector, Int16Vector, Int32Vector, Int8Vector, UInt16Vector,
UInt32Vector, UInt8Vector,
};
use super::*;
#[test]
fn test_clip_signature() {
let clip = ClipFunction;
assert_eq!("clip", clip.name());
assert_eq!(
ConcreteDataType::int64_datatype(),
clip.return_type(&[]).unwrap()
);
assert_eq!(
ConcreteDataType::int64_datatype(),
clip.return_type(&[
ConcreteDataType::int16_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::int8_datatype()
])
.unwrap()
);
assert_eq!(
ConcreteDataType::uint64_datatype(),
clip.return_type(&[
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint64_datatype(),
ConcreteDataType::uint8_datatype()
])
.unwrap()
);
assert_eq!(
ConcreteDataType::float64_datatype(),
clip.return_type(&[
ConcreteDataType::uint16_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype()
])
.unwrap()
);
assert!(matches!(clip.signature(),
Signature {
type_signature: TypeSignature::Uniform(3, valid_types),
volatility: Volatility::Immutable
} if valid_types == ConcreteDataType::numerics()
));
}
#[test]
fn test_clip_fn_signed() {
// eval with signed integers
let args: Vec<VectorRef> = vec![
Arc::new(Int32Vector::from_values(0..10)),
Arc::new(ConstantVector::new(
Arc::new(Int8Vector::from_vec(vec![3])),
10,
)),
Arc::new(ConstantVector::new(
Arc::new(Int16Vector::from_vec(vec![6])),
10,
)),
];
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
for i in 0..10 {
if i <= 3 {
assert!(matches!(vector.get(i), Value::Int64(v) if v == 3));
} else if i <= 6 {
assert!(matches!(vector.get(i), Value::Int64(v) if v == (i as i64)));
} else {
assert!(matches!(vector.get(i), Value::Int64(v) if v == 6));
}
}
}
#[test]
fn test_clip_fn_unsigned() {
// eval with unsigned integers
let args: Vec<VectorRef> = vec![
Arc::new(UInt8Vector::from_values(0..10)),
Arc::new(ConstantVector::new(
Arc::new(UInt32Vector::from_vec(vec![3])),
10,
)),
Arc::new(ConstantVector::new(
Arc::new(UInt16Vector::from_vec(vec![6])),
10,
)),
];
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
for i in 0..10 {
if i <= 3 {
assert!(matches!(vector.get(i), Value::UInt64(v) if v == 3));
} else if i <= 6 {
assert!(matches!(vector.get(i), Value::UInt64(v) if v == (i as u64)));
} else {
assert!(matches!(vector.get(i), Value::UInt64(v) if v == 6));
}
}
}
#[test]
fn test_clip_fn_float() {
// eval with floats
let args: Vec<VectorRef> = vec![
Arc::new(Int8Vector::from_values(0..10)),
Arc::new(ConstantVector::new(
Arc::new(UInt32Vector::from_vec(vec![3])),
10,
)),
Arc::new(ConstantVector::new(
Arc::new(Float32Vector::from_vec(vec![6f32])),
10,
)),
];
let vector = ClipFunction
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(10, vector.len());
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
for i in 0..10 {
if i <= 3 {
assert!(matches!(vector.get(i), Value::Float64(v) if v == 3.0));
} else if i <= 6 {
assert!(matches!(vector.get(i), Value::Float64(v) if v == (i as f64)));
} else {
assert!(matches!(vector.get(i), Value::Float64(v) if v == 6.0));
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More