mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 21:32:58 +00:00
Compare commits
87 Commits
v0.1.0-alp
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b3f955ca7 | ||
|
|
4b58a8a18d | ||
|
|
bd377ef329 | ||
|
|
df751c38b4 | ||
|
|
f6e871708a | ||
|
|
819c990a89 | ||
|
|
a8b4e8d933 | ||
|
|
710e2ed133 | ||
|
|
81eab74b90 | ||
|
|
8f67d8ca93 | ||
|
|
4cc3ac37d5 | ||
|
|
b48c851b96 | ||
|
|
fdd17c6eeb | ||
|
|
51641db39e | ||
|
|
98ef74bff4 | ||
|
|
f42acc90c2 | ||
|
|
2df8143ad5 | ||
|
|
fb2e0c7cf3 | ||
|
|
390e9095f6 | ||
|
|
bcd44b90c1 | ||
|
|
c6f2db8ae0 | ||
|
|
e17d5a1c41 | ||
|
|
23092a5208 | ||
|
|
4bbad6ab1e | ||
|
|
6833b405d9 | ||
|
|
aaaf24143d | ||
|
|
9161796dfa | ||
|
|
68b231987c | ||
|
|
6e9964ac97 | ||
|
|
6afd79cab8 | ||
|
|
4e88a01638 | ||
|
|
af1f8d6101 | ||
|
|
a9c8584c98 | ||
|
|
7787cfdd42 | ||
|
|
2f39a77137 | ||
|
|
16f86a9d77 | ||
|
|
5ec1a7027b | ||
|
|
ddbc97befb | ||
|
|
a8c2b35ec6 | ||
|
|
04afee216e | ||
|
|
5533040be7 | ||
|
|
34fdba77df | ||
|
|
cd0d58cb24 | ||
|
|
8b869642b8 | ||
|
|
a33d1e9863 | ||
|
|
dfe7bfb07f | ||
|
|
5d1f231004 | ||
|
|
40eec85cf7 | ||
|
|
e17d564bf0 | ||
|
|
301656d568 | ||
|
|
a19dee1dc0 | ||
|
|
75b8afe043 | ||
|
|
e2904b99ac | ||
|
|
de0b8aa0a0 | ||
|
|
63e396e9e9 | ||
|
|
4d8276790b | ||
|
|
374acc8830 | ||
|
|
8491f65093 | ||
|
|
5e6f340dd9 | ||
|
|
7b98718cd9 | ||
|
|
0f7e5a2fb2 | ||
|
|
9ad6c45913 | ||
|
|
7fe417e740 | ||
|
|
c1a9f84c7f | ||
|
|
be897efd01 | ||
|
|
c06e04afbb | ||
|
|
e77a7f253c | ||
|
|
7d6f4cd88b | ||
|
|
83ac6598b6 | ||
|
|
4c925e0079 | ||
|
|
c6128ec0a4 | ||
|
|
7c34b009ec | ||
|
|
70edd4d55b | ||
|
|
6beea73590 | ||
|
|
c0d3533d10 | ||
|
|
9989a8c192 | ||
|
|
19dd8b1246 | ||
|
|
1e9918ddf9 | ||
|
|
4ce62f850b | ||
|
|
83d57f9111 | ||
|
|
803b7f0633 | ||
|
|
37ca5ba380 | ||
|
|
c1d32bdf2b | ||
|
|
83509f31f4 | ||
|
|
926022e14c | ||
|
|
2f2609d8c6 | ||
|
|
ecadbc1435 |
@@ -1,2 +1,5 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
10
.github/workflows/develop.yml
vendored
10
.github/workflows/develop.yml
vendored
@@ -24,15 +24,15 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-14
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo run --bin sqlness-runner && ls /tmp
|
||||
run: cargo sqlness && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
@@ -223,5 +223,5 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
16
.github/workflows/doc-issue.yml
vendored
16
.github/workflows/doc-issue.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Create Issue in docs repo on doc related changes
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
@@ -23,3 +23,17 @@ jobs:
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@main
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
|
||||
5
.github/workflows/release.yml
vendored
5
.github/workflows/release.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-14
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
@@ -40,6 +40,7 @@ jobs:
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
runs-on: ${{ matrix.os }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -132,6 +133,7 @@ jobs:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -174,6 +176,7 @@ jobs:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
547
Cargo.lock
generated
547
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,7 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
@@ -48,11 +49,13 @@ license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = "29.0"
|
||||
arrow-array = "29.0"
|
||||
arrow-flight = "29.0"
|
||||
arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(LFC): Use released Datafusion when it officially dependent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
@@ -65,10 +68,12 @@ parquet = "29.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.28"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tonic = "0.8"
|
||||
tokio-util = "0.7"
|
||||
tonic = { version = "0.8", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
[profile.release]
|
||||
|
||||
6
Makefile
6
Makefile
@@ -19,6 +19,10 @@ clean: ## Clean the project.
|
||||
fmt: ## Format all the Rust code.
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
@@ -35,7 +39,7 @@ integration-test: ## Run integation test.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo run --bin sqlness-runner
|
||||
cargo sqlness
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
|
||||
@@ -27,7 +27,7 @@ use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::{Client, Database};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
@@ -422,7 +422,7 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::with_client(client);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
|
||||
@@ -8,3 +8,5 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -19,8 +19,17 @@ sync_write = false
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
[meta_client_opts]
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = false
|
||||
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 16
|
||||
max_purge_tasks = 32
|
||||
|
||||
[procedure.store]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/procedure/'
|
||||
|
||||
@@ -5,7 +5,7 @@ datanode_rpc_addr = '127.0.0.1:3001'
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[meta_client_opts]
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
|
||||
@@ -14,7 +14,6 @@ purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
@@ -42,3 +41,7 @@ enable = true
|
||||
addr = '127.0.0.1:4003'
|
||||
runtime_size = 2
|
||||
check_pwd = false
|
||||
|
||||
[procedure.store]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/procedure/'
|
||||
|
||||
@@ -149,10 +149,10 @@ inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
@@ -140,8 +140,6 @@ Rollback is complicated to implement so some procedures might not support rollba
|
||||
## Locking
|
||||
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
|
||||
|
||||
Sub-procedures always inherit their parents' locks. The `ProcedureManager` only acquires locks for a procedure if its parent doesn't hold the lock.
|
||||
|
||||
# Drawbacks
|
||||
The `Procedure` framework introduces additional complexity and overhead to our database.
|
||||
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2022-12-20"
|
||||
channel = "nightly-2023-02-14"
|
||||
|
||||
@@ -10,6 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
tonic_build::configure()
|
||||
.compile(
|
||||
&[
|
||||
"greptime/v1/database.proto",
|
||||
"greptime/v1/meta/common.proto",
|
||||
"greptime/v1/meta/heartbeat.proto",
|
||||
"greptime/v1/meta/route.proto",
|
||||
"greptime/v1/meta/store.proto",
|
||||
"prometheus/remote/remote.proto",
|
||||
],
|
||||
&["."],
|
||||
)
|
||||
.expect("compile proto");
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message Column {
|
||||
string column_name = 1;
|
||||
|
||||
enum SemanticType {
|
||||
TAG = 0;
|
||||
FIELD = 1;
|
||||
TIMESTAMP = 2;
|
||||
}
|
||||
SemanticType semantic_type = 2;
|
||||
|
||||
message Values {
|
||||
repeated int32 i8_values = 1;
|
||||
repeated int32 i16_values = 2;
|
||||
repeated int32 i32_values = 3;
|
||||
repeated int64 i64_values = 4;
|
||||
|
||||
repeated uint32 u8_values = 5;
|
||||
repeated uint32 u16_values = 6;
|
||||
repeated uint32 u32_values = 7;
|
||||
repeated uint64 u64_values = 8;
|
||||
|
||||
repeated float f32_values = 9;
|
||||
repeated double f64_values = 10;
|
||||
|
||||
repeated bool bool_values = 11;
|
||||
repeated bytes binary_values = 12;
|
||||
repeated string string_values = 13;
|
||||
|
||||
repeated int32 date_values = 14;
|
||||
repeated int64 datetime_values = 15;
|
||||
repeated int64 ts_second_values = 16;
|
||||
repeated int64 ts_millisecond_values = 17;
|
||||
repeated int64 ts_microsecond_values = 18;
|
||||
repeated int64 ts_nanosecond_values = 19;
|
||||
}
|
||||
// The array of non-null values in this column.
|
||||
//
|
||||
// For example: suppose there is a column "foo" that contains some int32 values (1, 2, 3, 4, 5, null, 7, 8, 9, null);
|
||||
// column:
|
||||
// column_name: foo
|
||||
// semantic_type: Tag
|
||||
// values: 1, 2, 3, 4, 5, 7, 8, 9
|
||||
// null_masks: 00100000 00000010
|
||||
Values values = 3;
|
||||
|
||||
// Mask maps the positions of null values.
|
||||
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
|
||||
bytes null_mask = 4;
|
||||
|
||||
// Helpful in creating vector from column.
|
||||
ColumnDataType datatype = 5;
|
||||
}
|
||||
|
||||
message ColumnDef {
|
||||
string name = 1;
|
||||
ColumnDataType datatype = 2;
|
||||
bool is_nullable = 3;
|
||||
bytes default_constraint = 4;
|
||||
}
|
||||
|
||||
enum ColumnDataType {
|
||||
BOOLEAN = 0;
|
||||
INT8 = 1;
|
||||
INT16 = 2;
|
||||
INT32 = 3;
|
||||
INT64 = 4;
|
||||
UINT8 = 5;
|
||||
UINT16 = 6;
|
||||
UINT32 = 7;
|
||||
UINT64 = 8;
|
||||
FLOAT32 = 9;
|
||||
FLOAT64 = 10;
|
||||
BINARY = 11;
|
||||
STRING = 12;
|
||||
DATE = 13;
|
||||
DATETIME = 14;
|
||||
TIMESTAMP_SECOND = 15;
|
||||
TIMESTAMP_MILLISECOND = 16;
|
||||
TIMESTAMP_MICROSECOND = 17;
|
||||
TIMESTAMP_NANOSECOND = 18;
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/ddl.proto";
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
message RequestHeader {
|
||||
// The `catalog` that is selected to be used in this request.
|
||||
string catalog = 1;
|
||||
// The `schema` that is selected to be used in this request.
|
||||
string schema = 2;
|
||||
}
|
||||
|
||||
message GreptimeRequest {
|
||||
RequestHeader header = 1;
|
||||
oneof request {
|
||||
InsertRequest insert = 2;
|
||||
QueryRequest query = 3;
|
||||
DdlRequest ddl = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message QueryRequest {
|
||||
oneof query {
|
||||
string sql = 1;
|
||||
bytes logical_plan = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InsertRequest {
|
||||
string table_name = 1;
|
||||
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertRequest must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
}
|
||||
|
||||
message AffectedRows {
|
||||
uint32 value = 1;
|
||||
}
|
||||
|
||||
message FlightMetadata {
|
||||
AffectedRows affected_rows = 1;
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
// "Data Definition Language" requests, that create, modify or delete the database structures but not the data.
|
||||
// `DdlRequest` could carry more information than plain SQL, for example, the "table_id" in `CreateTableExpr`.
|
||||
// So create a new DDL expr if you need it.
|
||||
message DdlRequest {
|
||||
oneof expr {
|
||||
CreateDatabaseExpr create_database = 1;
|
||||
CreateTableExpr create_table = 2;
|
||||
AlterExpr alter = 3;
|
||||
DropTableExpr drop_table = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message CreateTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
string desc = 4;
|
||||
repeated ColumnDef column_defs = 5;
|
||||
string time_index = 6;
|
||||
repeated string primary_keys = 7;
|
||||
bool create_if_not_exists = 8;
|
||||
map<string, string> table_options = 9;
|
||||
TableId table_id = 10;
|
||||
repeated uint32 region_ids = 11;
|
||||
}
|
||||
|
||||
message AlterExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
oneof kind {
|
||||
AddColumns add_columns = 4;
|
||||
DropColumns drop_columns = 5;
|
||||
RenameTable rename_table = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message DropTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
bool create_if_not_exists = 2;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
|
||||
message DropColumns {
|
||||
repeated DropColumn drop_columns = 1;
|
||||
}
|
||||
|
||||
message RenameTable {
|
||||
string new_table_name = 1;
|
||||
}
|
||||
|
||||
message AddColumn {
|
||||
ColumnDef column_def = 1;
|
||||
bool is_key = 2;
|
||||
}
|
||||
|
||||
message DropColumn {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TableId {
|
||||
uint32 id = 1;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
message RequestHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
uint64 cluster_id = 2;
|
||||
// member_id is the ID of the sender server.
|
||||
uint64 member_id = 3;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
uint64 cluster_id = 2;
|
||||
Error error = 3;
|
||||
}
|
||||
|
||||
message Error {
|
||||
int32 code = 1;
|
||||
string err_msg = 2;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
uint64 id = 1;
|
||||
string addr = 2;
|
||||
}
|
||||
|
||||
message TableName {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message TimeInterval {
|
||||
// The unix timestamp in millis of the start of this period.
|
||||
uint64 start_timestamp_millis = 1;
|
||||
// The unix timestamp in millis of the end of this period.
|
||||
uint64 end_timestamp_millis = 2;
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// key is the key in bytes. An empty key is not allowed.
|
||||
bytes key = 1;
|
||||
// value is the value held by the key, in bytes.
|
||||
bytes value = 2;
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Heartbeat {
|
||||
// Heartbeat, there may be many contents of the heartbeat, such as:
|
||||
// 1. Metadata to be registered to meta server and discoverable by other nodes.
|
||||
// 2. Some performance metrics, such as Load, CPU usage, etc.
|
||||
// 3. The number of computing tasks being executed.
|
||||
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
|
||||
|
||||
// Ask leader's endpoint.
|
||||
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
|
||||
}
|
||||
|
||||
message HeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// Self peer
|
||||
Peer peer = 2;
|
||||
// Leader node
|
||||
bool is_leader = 3;
|
||||
// Actually reported time interval
|
||||
TimeInterval report_interval = 4;
|
||||
// Node stat
|
||||
NodeStat node_stat = 5;
|
||||
// Region stats on this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
// Follower nodes and stats, empty on follower nodes
|
||||
repeated ReplicaStat replica_stats = 7;
|
||||
}
|
||||
|
||||
message NodeStat {
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 1;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 2;
|
||||
// How many tables on this node
|
||||
int64 table_num = 3;
|
||||
// How many regions on this node
|
||||
int64 region_num = 4;
|
||||
|
||||
double cpu_usage = 5;
|
||||
double load = 6;
|
||||
// Read disk IO on this node
|
||||
double read_io_rate = 7;
|
||||
// Write disk IO on this node
|
||||
double write_io_rate = 8;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 3;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 4;
|
||||
// Approximate bytes of this region
|
||||
int64 approximate_bytes = 5;
|
||||
// Approximate number of rows in this region
|
||||
int64 approximate_rows = 6;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message ReplicaStat {
|
||||
Peer peer = 1;
|
||||
bool in_sync = 2;
|
||||
bool is_learner = 3;
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated bytes payload = 2;
|
||||
}
|
||||
|
||||
message AskLeaderRequest {
|
||||
RequestHeader header = 1;
|
||||
}
|
||||
|
||||
message AskLeaderResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
Peer leader = 2;
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Router {
|
||||
rpc Create(CreateRequest) returns (RouteResponse) {}
|
||||
|
||||
// Fetch routing information for tables. The smallest unit is the complete
|
||||
// routing information(all regions) of a table.
|
||||
//
|
||||
// ```text
|
||||
// table_1
|
||||
// table_name
|
||||
// table_schema
|
||||
// regions
|
||||
// region_1
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2
|
||||
// region_2
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2, follower_peer_3
|
||||
// region_xxx
|
||||
// table_2
|
||||
// ...
|
||||
// ```
|
||||
//
|
||||
rpc Route(RouteRequest) returns (RouteResponse) {}
|
||||
|
||||
rpc Delete(DeleteRequest) returns (RouteResponse) {}
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
repeated Partition partitions = 3;
|
||||
}
|
||||
|
||||
message RouteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated TableName table_names = 2;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
}
|
||||
|
||||
message RouteResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated Peer peers = 2;
|
||||
repeated TableRoute table_routes = 3;
|
||||
}
|
||||
|
||||
message TableRoute {
|
||||
Table table = 1;
|
||||
repeated RegionRoute region_routes = 2;
|
||||
}
|
||||
|
||||
message RegionRoute {
|
||||
Region region = 1;
|
||||
// single leader node for write task
|
||||
uint64 leader_peer_index = 2;
|
||||
// multiple follower nodes for read task
|
||||
repeated uint64 follower_peer_indexes = 3;
|
||||
}
|
||||
|
||||
message Table {
|
||||
uint64 id = 1;
|
||||
TableName table_name = 2;
|
||||
bytes table_schema = 3;
|
||||
}
|
||||
|
||||
message Region {
|
||||
// TODO(LFC): Maybe use message RegionNumber?
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
// PARTITION `region_name` VALUES LESS THAN (value_list)
|
||||
message Partition {
|
||||
repeated bytes column_list = 1;
|
||||
repeated bytes value_list = 2;
|
||||
}
|
||||
|
||||
// This message is only for saving into store.
|
||||
message TableRouteValue {
|
||||
repeated Peer peers = 1;
|
||||
TableRoute table_route = 2;
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Store {
|
||||
// Range gets the keys in the range from the key-value store.
|
||||
rpc Range(RangeRequest) returns (RangeResponse);
|
||||
|
||||
// Put puts the given key into the key-value store.
|
||||
rpc Put(PutRequest) returns (PutResponse);
|
||||
|
||||
// BatchPut atomically puts the given keys into the key-value store.
|
||||
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
|
||||
|
||||
// CompareAndPut atomically puts the value to the given updated
|
||||
// value if the current value == the expected value.
|
||||
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
|
||||
|
||||
// DeleteRange deletes the given range from the key-value store.
|
||||
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
|
||||
|
||||
// MoveValue atomically renames the key to the given updated key.
|
||||
rpc MoveValue(MoveValueRequest) returns (MoveValueResponse);
|
||||
}
|
||||
|
||||
message RangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key for the range, If range_end is not given, the
|
||||
// request only looks up key.
|
||||
bytes key = 2;
|
||||
// range_end is the upper bound on the requested range [key, range_end).
|
||||
// If range_end is '\0', the range is all keys >= key.
|
||||
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
// then the range request gets all keys prefixed with key.
|
||||
// If both key and range_end are '\0', then the range request returns all
|
||||
// keys.
|
||||
bytes range_end = 3;
|
||||
// limit is a limit on the number of keys returned for the request. When
|
||||
// limit is set to 0, it is treated as no limit.
|
||||
int64 limit = 4;
|
||||
// keys_only when set returns only the keys and not the values.
|
||||
bool keys_only = 5;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// kvs is the list of key-value pairs matched by the range request.
|
||||
repeated KeyValue kvs = 2;
|
||||
// more indicates if there are more keys to return in the requested range.
|
||||
bool more = 3;
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 3;
|
||||
// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
// The previous key-value pair will be returned in the put response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pair will be
|
||||
// returned.
|
||||
KeyValue prev_kv = 2;
|
||||
}
|
||||
|
||||
message BatchPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated KeyValue kvs = 2;
|
||||
// If prev_kv is set, gets the previous key-value pairs before changing it.
|
||||
// The previous key-value pairs will be returned in the batch put response.
|
||||
bool prev_kv = 3;
|
||||
}
|
||||
|
||||
message BatchPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 2;
|
||||
}
|
||||
|
||||
message CompareAndPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// expect is the previous value, in bytes
|
||||
bytes expect = 3;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message CompareAndPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
bool success = 2;
|
||||
KeyValue prev_kv = 3;
|
||||
}
|
||||
|
||||
message DeleteRangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key to delete in the range.
|
||||
bytes key = 2;
|
||||
// range_end is the key following the last key to delete for the range
|
||||
// [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key
|
||||
// argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the
|
||||
// key argument.
|
||||
bytes range_end = 3;
|
||||
// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delete response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message DeleteRangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// deleted is the number of keys deleted by the delete range request.
|
||||
int64 deleted = 2;
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 3;
|
||||
}
|
||||
|
||||
message MoveValueRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, move the value of from_key to to_key (i.e. rename),
|
||||
// and return the value.
|
||||
bytes from_key = 2;
|
||||
bytes to_key = 3;
|
||||
}
|
||||
|
||||
message MoveValueResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, return the value of from_key.
|
||||
KeyValue kv = 2;
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prometheus/remote/types.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3;
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to model/textparse/interface.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1;
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1;
|
||||
repeated Sample samples = 2;
|
||||
repeated Exemplar exemplars = 3;
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1;
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1;
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2;
|
||||
}
|
||||
@@ -97,7 +97,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
|
||||
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
@@ -105,125 +107,121 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl Values {
|
||||
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl Column {
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
|
||||
let values = self.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&self.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
let values = column.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&column.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
|
||||
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
self.null_mask = null_mask.into_vec();
|
||||
}
|
||||
(0..len).for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -239,59 +237,59 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_values_with_capacity() {
|
||||
let values = Values::with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values.i8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values.i32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values.i64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values.u8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values.u32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values.u64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values.f32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values.f64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values.binary_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values.bool_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::String, 2);
|
||||
let values = values_with_capacity(ColumnDataType::String, 2);
|
||||
let values = values.string_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values.date_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values.datetime_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values.ts_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
@@ -462,28 +460,28 @@ mod tests {
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().ts_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().ts_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().ts_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().ts_second_values
|
||||
@@ -507,7 +505,7 @@ mod tests {
|
||||
let row_count = 4;
|
||||
|
||||
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
|
||||
column.push_vals(row_count, vector);
|
||||
push_vals(&mut column, row_count, vector);
|
||||
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
|
||||
let bool_values = column.values.unwrap().bool_values;
|
||||
assert_eq!(vec![false, true, true, true, false], bool_values);
|
||||
|
||||
@@ -14,8 +14,13 @@
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
pub mod serde;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
}
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
($data_type: ty) => {
|
||||
impl From<$data_type> for Vec<u8> {
|
||||
fn from(entity: $data_type) -> Self {
|
||||
entity.encode_to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for $data_type {
|
||||
type Error = DecodeError;
|
||||
|
||||
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
|
||||
<$data_type>::decode(value.as_ref())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
pub mod column_def;
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
pub mod meta {
|
||||
pub use greptime_proto::v1::meta::*;
|
||||
}
|
||||
|
||||
pub use greptime_proto::v1::*;
|
||||
|
||||
@@ -19,21 +19,24 @@ use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
impl ColumnDef {
|
||||
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
|
||||
let constraint = if self.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(self.default_constraint.as_slice())
|
||||
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
|
||||
)
|
||||
};
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()).context(
|
||||
error::ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
|
||||
}
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
tonic::include_proto!("greptime.v1.meta");
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub const PROTOCOL_VERSION: u64 = 1;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeerDict {
|
||||
peers: HashMap<Peer, usize>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl PeerDict {
|
||||
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
|
||||
let index = self.peers.entry(peer).or_insert_with(|| {
|
||||
let v = self.index;
|
||||
self.index += 1;
|
||||
v
|
||||
});
|
||||
|
||||
*index
|
||||
}
|
||||
|
||||
pub fn into_peers(self) -> Vec<Peer> {
|
||||
let mut array = vec![Peer::default(); self.index];
|
||||
for (p, i) in self.peers {
|
||||
array[i] = p;
|
||||
}
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl Hash for Peer {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.addr.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Peer {}
|
||||
|
||||
impl RequestHeader {
|
||||
#[inline]
|
||||
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
member_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseHeader {
|
||||
#[inline]
|
||||
pub fn success(cluster_id: u64) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn failed(cluster_id: u64, error: Error) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
error: Some(error),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(error) = &self.error {
|
||||
if error.code == ErrorCode::NotLeader as i32 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ErrorCode {
|
||||
NoActiveDatanodes = 1,
|
||||
NotLeader = 2,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[inline]
|
||||
pub fn no_active_datanodes() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NoActiveDatanodes as i32,
|
||||
err_msg: "No active datanodes".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NotLeader as i32,
|
||||
err_msg: "Current server is not leader".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatResponse {
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(header) = &self.header {
|
||||
return header.is_not_leader();
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! gen_set_header {
|
||||
($req: ty) => {
|
||||
impl $req {
|
||||
#[inline]
|
||||
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
|
||||
self.header = Some(RequestHeader::new((cluster_id, member_id)));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
gen_set_header!(HeartbeatRequest);
|
||||
gen_set_header!(RouteRequest);
|
||||
gen_set_header!(CreateRequest);
|
||||
gen_set_header!(RangeRequest);
|
||||
gen_set_header!(DeleteRequest);
|
||||
gen_set_header!(PutRequest);
|
||||
gen_set_header!(BatchPutRequest);
|
||||
gen_set_header!(CompareAndPutRequest);
|
||||
gen_set_header!(DeleteRangeRequest);
|
||||
gen_set_header!(MoveValueRequest);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_dict() {
|
||||
let mut dict = PeerDict::default();
|
||||
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(2, dict.index);
|
||||
assert_eq!(
|
||||
vec![
|
||||
Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
}
|
||||
],
|
||||
dict.into_peers()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,7 @@ table = { path = "../table" }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
|
||||
@@ -19,7 +19,6 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
@@ -162,25 +161,18 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
|
||||
table_info,
|
||||
schema,
|
||||
source
|
||||
))]
|
||||
InvalidTableSchema {
|
||||
table_info: String,
|
||||
schema: RawSchema,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
@@ -203,6 +195,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -224,7 +222,9 @@ impl ErrorExt for Error {
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
@@ -240,9 +240,10 @@ impl ErrorExt for Error {
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::SchemaProviderOperation { source } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
}
|
||||
|
||||
@@ -24,10 +24,10 @@ use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
@@ -370,4 +370,10 @@ mod tests {
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
|
||||
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
|
||||
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::helper::{
|
||||
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
|
||||
@@ -346,21 +346,13 @@ impl RemoteCatalogManager {
|
||||
);
|
||||
|
||||
let meta = &table_info.meta;
|
||||
let schema = meta
|
||||
.schema
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(InvalidTableSchemaSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
|
||||
schema: meta.schema.clone(),
|
||||
})?;
|
||||
let req = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: Arc::new(schema),
|
||||
schema: meta.schema.clone(),
|
||||
region_numbers: region_numbers.clone(),
|
||||
primary_key_indices: meta.primary_key_indices.clone(),
|
||||
create_if_not_exists: true,
|
||||
@@ -430,11 +422,18 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
let schema = self
|
||||
.schema(catalog_name, schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let result = schema.deregister_table(&request.table_name)?;
|
||||
Ok(result.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
|
||||
@@ -26,13 +26,15 @@ use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
@@ -88,7 +90,7 @@ impl SystemCatalogTable {
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
};
|
||||
let schema = Arc::new(build_system_catalog_schema());
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
if let Some(table) = engine
|
||||
@@ -105,11 +107,11 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
desc: Some("System catalog table".to_string()),
|
||||
schema: schema.clone(),
|
||||
schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
|
||||
create_if_not_exists: true,
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let table = engine
|
||||
@@ -143,7 +145,7 @@ impl SystemCatalogTable {
|
||||
/// - value: JSON-encoded value of entry's metadata.
|
||||
/// - gmt_created: create time of this metadata.
|
||||
/// - gmt_modified: last updated time of this metadata.
|
||||
fn build_system_catalog_schema() -> Schema {
|
||||
fn build_system_catalog_schema() -> RawSchema {
|
||||
let cols = vec![
|
||||
ColumnSchema::new(
|
||||
"entry_type".to_string(),
|
||||
@@ -178,8 +180,7 @@ fn build_system_catalog_schema() -> Schema {
|
||||
),
|
||||
];
|
||||
|
||||
// The schema of this table must be valid.
|
||||
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
|
||||
RawSchema::new(cols)
|
||||
}
|
||||
|
||||
/// Formats key string for table entry in system catalog
|
||||
@@ -398,7 +399,8 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableType;
|
||||
@@ -480,17 +482,19 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = TempDir::new("system-table-test").unwrap();
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::backend::fs::Builder::default()
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
@@ -162,16 +162,10 @@ fn tables_to_record_batch(
|
||||
|
||||
for table_name in table_names {
|
||||
// Safety: All these vectors are string type.
|
||||
catalog_vec
|
||||
.push_value_ref(ValueRef::String(catalog_name))
|
||||
.unwrap();
|
||||
schema_vec
|
||||
.push_value_ref(ValueRef::String(schema_name))
|
||||
.unwrap();
|
||||
table_name_vec
|
||||
.push_value_ref(ValueRef::String(&table_name))
|
||||
.unwrap();
|
||||
engine_vec.push_value_ref(ValueRef::String(engine)).unwrap();
|
||||
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
|
||||
schema_vec.push_value_ref(ValueRef::String(schema_name));
|
||||
table_name_vec.push_value_ref(ValueRef::String(&table_name));
|
||||
engine_vec.push_value_ref(ValueRef::String(engine));
|
||||
}
|
||||
|
||||
vec![
|
||||
|
||||
@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ mod tests {
|
||||
};
|
||||
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::Schema;
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -116,7 +116,7 @@ mod tests {
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let table_name = "fail_table".to_string();
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -126,7 +126,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -176,7 +176,7 @@ mod tests {
|
||||
let table_name = "test_table".to_string();
|
||||
let table_id = 1;
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -186,7 +186,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: "".to_string(),
|
||||
desc: None,
|
||||
schema: Arc::new(Schema::new(vec![])),
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
|
||||
@@ -32,12 +32,8 @@ substrait = { path = "../common/substrait" }
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
|
||||
[dev-dependencies.prost_09]
|
||||
package = "prost"
|
||||
version = "0.9"
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
@@ -14,11 +14,12 @@
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
|
||||
use client::{Client, Database};
|
||||
use prost_09::Message;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use prost::Message;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
@@ -65,7 +66,7 @@ async fn run() {
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let db = Database::with_client(client);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
@@ -88,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
|
||||
let read_type = ReadType::NamedTable(named_table);
|
||||
|
||||
let read_rel = ReadRel {
|
||||
common: None,
|
||||
base_schema: None,
|
||||
filter: None,
|
||||
projection: None,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut buf = vec![];
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
|
||||
QueryRequest, RequestHeader,
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
@@ -42,6 +42,7 @@ pub struct Database {
|
||||
schema: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
@@ -50,17 +51,24 @@ impl Database {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_client(client: Client) -> Self {
|
||||
Self::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client)
|
||||
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
}
|
||||
@@ -105,6 +113,7 @@ impl Database {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
@@ -164,12 +173,18 @@ fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::Column;
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic, Column};
|
||||
use common_grpc::select::{null_mask, values};
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use datatypes::prelude::{Vector, VectorRef};
|
||||
@@ -179,6 +194,8 @@ mod tests {
|
||||
UInt32Vector, UInt64Vector, UInt8Vector,
|
||||
};
|
||||
|
||||
use crate::database::FlightContext;
|
||||
|
||||
#[test]
|
||||
fn test_column_to_vector() {
|
||||
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
|
||||
@@ -262,4 +279,26 @@ mod tests {
|
||||
datatype: wrapper.datatype() as i32,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
let mut ctx = FlightContext::default();
|
||||
assert!(ctx.auth_header.is_none());
|
||||
|
||||
let basic = AuthScheme::Basic(Basic {
|
||||
username: "u".to_string(),
|
||||
password: "p".to_string(),
|
||||
});
|
||||
|
||||
ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(basic),
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
ctx.auth_header,
|
||||
Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ mod error;
|
||||
pub mod load_balance;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
|
||||
@@ -12,15 +12,22 @@ path = "src/bin/greptime.rs"
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
nu-ansi-term = "0.46"
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
snafu.workspace = true
|
||||
@@ -28,6 +35,7 @@ tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
serde.workspace = true
|
||||
tempdir = "0.3"
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::{datanode, frontend, metasrv, standalone};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -46,6 +46,8 @@ enum SubCommand {
|
||||
Metasrv(metasrv::Command),
|
||||
#[clap(name = "standalone")]
|
||||
Standalone(standalone::Command),
|
||||
#[clap(name = "cli")]
|
||||
Cli(cli::Command),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -55,6 +57,7 @@ impl SubCommand {
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,6 +69,7 @@ impl fmt::Display for SubCommand {
|
||||
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
|
||||
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
|
||||
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
|
||||
SubCommand::Cli(_) => write!(f, "greptime-cli"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
62
src/cmd/src/cli.rs
Normal file
62
src/cmd/src/cli.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cmd;
|
||||
mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self)?;
|
||||
repl.run().await
|
||||
}
|
||||
}
|
||||
154
src/cmd/src/cli/cmd.rs
Normal file
154
src/cmd/src/cli/cmd.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, InvalidReplCommandSnafu, Result};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ReplCommand {
|
||||
Help,
|
||||
UseDatabase { db_name: String },
|
||||
Sql { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ReplCommand {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(input: &str) -> Result<Self> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return InvalidReplCommandSnafu {
|
||||
reason: "No command specified".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
// If line ends with ';', it must be treated as a complete input.
|
||||
// However, the opposite is not true.
|
||||
let input_is_completed = input.ends_with(';');
|
||||
|
||||
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
|
||||
let lowercase = input.to_lowercase();
|
||||
match lowercase.as_str() {
|
||||
"help" => Ok(Self::Help),
|
||||
"exit" | "quit" => Ok(Self::Exit),
|
||||
_ => match input.split_once(' ') {
|
||||
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
|
||||
Ok(Self::UseDatabase {
|
||||
db_name: database.trim().to_string(),
|
||||
})
|
||||
}
|
||||
// Any valid SQL must contains at least one whitespace.
|
||||
Some(_) if input_is_completed => Ok(Self::Sql {
|
||||
sql: input.to_string(),
|
||||
}),
|
||||
_ => InvalidReplCommandSnafu {
|
||||
reason: format!("unknown command '{input}', maybe input is not completed"),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (case insensitive):
|
||||
- 'help': print this help
|
||||
- 'exit' or 'quit': exit the REPL
|
||||
- 'use <your database name>': switch to another database/schema context
|
||||
- Other typed in text will be treated as SQL.
|
||||
You can enter new line while typing, just remember to end it with ';'.
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error::InvalidReplCommand;
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
fn test_ok(s: &str, expected: ReplCommand) {
|
||||
let actual: ReplCommand = s.try_into().unwrap();
|
||||
assert_eq!(expected, actual, "'{}'", s);
|
||||
}
|
||||
|
||||
fn test_err(s: &str) {
|
||||
let result: Result<ReplCommand> = s.try_into();
|
||||
assert!(matches!(result, Err(InvalidReplCommand { .. })))
|
||||
}
|
||||
|
||||
test_err("");
|
||||
test_err(" ");
|
||||
test_err("\t");
|
||||
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok(" help", ReplCommand::Help);
|
||||
test_ok(" help ", ReplCommand::Help);
|
||||
test_ok(" HELP ", ReplCommand::Help);
|
||||
test_ok(" Help; ", ReplCommand::Help);
|
||||
test_ok(" help ; ", ReplCommand::Help);
|
||||
|
||||
test_ok("exit", ReplCommand::Exit);
|
||||
test_ok("exit;", ReplCommand::Exit);
|
||||
test_ok("exit ;", ReplCommand::Exit);
|
||||
test_ok("EXIT", ReplCommand::Exit);
|
||||
|
||||
test_ok("quit", ReplCommand::Exit);
|
||||
test_ok("quit;", ReplCommand::Exit);
|
||||
test_ok("quit ;", ReplCommand::Exit);
|
||||
test_ok("QUIT", ReplCommand::Exit);
|
||||
|
||||
test_ok(
|
||||
"use Foo",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
test_ok(
|
||||
" use Foo ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
// ensure that database name is case sensitive
|
||||
test_ok(
|
||||
" use FOO ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
// ensure that we aren't messing with capitalization
|
||||
test_ok(
|
||||
"SELECT * from foo;",
|
||||
ReplCommand::Sql {
|
||||
sql: "SELECT * from foo".to_string(),
|
||||
},
|
||||
);
|
||||
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
|
||||
test_err("insert blah");
|
||||
test_ok(
|
||||
"insert blah;",
|
||||
ReplCommand::Sql {
|
||||
sql: "insert blah".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
112
src/cmd/src/cli/helper.rs
Normal file
112
src/cmd/src/cli/helper.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use rustyline::completion::Completer;
|
||||
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
|
||||
use rustyline::hint::{Hinter, HistoryHinter};
|
||||
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
|
||||
pub(crate) struct RustylineHelper {
|
||||
hinter: HistoryHinter,
|
||||
highlighter: MatchingBracketHighlighter,
|
||||
}
|
||||
|
||||
impl Default for RustylineHelper {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hinter: HistoryHinter {},
|
||||
highlighter: MatchingBracketHighlighter::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl rustyline::Helper for RustylineHelper {}
|
||||
|
||||
impl Validator for RustylineHelper {
|
||||
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
|
||||
let input = ctx.input();
|
||||
match ReplCommand::try_from(input) {
|
||||
Ok(_) => Ok(ValidationResult::Valid(None)),
|
||||
Err(e) => {
|
||||
if input.trim_end().ends_with(';') {
|
||||
// If line ends with ';', it HAS to be a valid command.
|
||||
Ok(ValidationResult::Invalid(Some(e.to_string())))
|
||||
} else {
|
||||
Ok(ValidationResult::Incomplete)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Hinter for RustylineHelper {
|
||||
type Hint = String;
|
||||
|
||||
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
|
||||
self.hinter.hint(line, pos, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Highlighter for RustylineHelper {
|
||||
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
|
||||
self.highlighter.highlight(line, pos)
|
||||
}
|
||||
|
||||
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
|
||||
&'s self,
|
||||
prompt: &'p str,
|
||||
default: bool,
|
||||
) -> Cow<'b, str> {
|
||||
self.highlighter.highlight_prompt(prompt, default)
|
||||
}
|
||||
|
||||
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
|
||||
use nu_ansi_term::Style;
|
||||
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
|
||||
}
|
||||
|
||||
fn highlight_candidate<'c>(
|
||||
&self,
|
||||
candidate: &'c str,
|
||||
completion: rustyline::CompletionType,
|
||||
) -> Cow<'c, str> {
|
||||
self.highlighter.highlight_candidate(candidate, completion)
|
||||
}
|
||||
|
||||
fn highlight_char(&self, line: &str, pos: usize) -> bool {
|
||||
self.highlighter.highlight_char(line, pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Completer for RustylineHelper {
|
||||
type Candidate = String;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
line: &str,
|
||||
pos: usize,
|
||||
ctx: &rustyline::Context<'_>,
|
||||
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
|
||||
// If there is a hint, use that as the auto-complete when user hits `tab`
|
||||
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
|
||||
Ok((pos, vec![hint]))
|
||||
} else {
|
||||
Ok((0, vec![]))
|
||||
}
|
||||
}
|
||||
}
|
||||
199
src/cmd/src/cli/repl.rs
Normal file
199
src/cmd/src/cli/repl.rs
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, PrettyPrintRecordBatchesSnafu, ReadlineSnafu, ReplCreationSnafu,
|
||||
RequestDatabaseSnafu, Result,
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with GreptimeDB
|
||||
database: Database,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
println!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
pub(crate) fn try_new(cmd: &AttachCommand) -> Result<Self> {
|
||||
let mut rl = Editor::new().context(ReplCreationSnafu)?;
|
||||
|
||||
if !cmd.disable_helper {
|
||||
rl.set_helper(Some(RustylineHelper::default()));
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let client = Client::with_urls([&cmd.grpc_addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
Ok(Self {
|
||||
rl,
|
||||
prompt: "> ".to_string(),
|
||||
database,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the next command
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
|
||||
// Some sort of real underlying error
|
||||
Err(e) => Err(e).context(ReadlineSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
|
||||
///
|
||||
/// Inspired / based on repl.rs from InfluxDB IOX
|
||||
pub(crate) async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
if self.execute_sql(format!("USE {db_name}")).await {
|
||||
println!("Using {db_name}");
|
||||
self.database.set_schema(&db_name);
|
||||
self.prompt = format!("[{db_name}] > ");
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql(&self, sql: String) -> bool {
|
||||
self.do_execute_sql(sql)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn do_execute_sql(&self, sql: String) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = self
|
||||
.database
|
||||
.sql(&sql)
|
||||
.await
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
match either {
|
||||
Either::Left(recordbatches) => {
|
||||
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
|
||||
if total_rows > 0 {
|
||||
println!(
|
||||
"{}",
|
||||
recordbatches
|
||||
.pretty_print()
|
||||
.context(PrettyPrintRecordBatchesSnafu)?
|
||||
);
|
||||
}
|
||||
println!("Total Rows: {total_rows}")
|
||||
}
|
||||
Either::Right(rows) => println!("Affected Rows: {rows}"),
|
||||
};
|
||||
|
||||
println!("Cost {} ms", (end - start).as_millis());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".greptimedb_cli_history");
|
||||
buf
|
||||
}
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -65,6 +67,8 @@ struct StartCommand {
|
||||
data_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -110,8 +114,8 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -134,6 +138,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -143,7 +152,7 @@ mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::time::Duration;
|
||||
|
||||
use datanode::datanode::ObjectStoreConfig;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -162,12 +171,12 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = options.meta_client_opts.unwrap();
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
@@ -181,6 +190,15 @@ mod tests {
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 16,
|
||||
max_purge_tasks: 32,
|
||||
},
|
||||
options.compaction
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -231,12 +249,12 @@ mod tests {
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
|
||||
assert!(!dn_opts.wal.sync_write);
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = dn_opts.meta_client_opts.unwrap();
|
||||
} = dn_opts.meta_client_options.unwrap();
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use rustyline::error::ReadlineError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -68,6 +69,40 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL: {}", source))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -82,8 +117,15 @@ impl ErrorExt for Error {
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
@@ -22,8 +23,7 @@ use frontend::instance::Instance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -91,10 +91,9 @@ impl StartCommand {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts)
|
||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
instance.set_plugins(plugins.clone());
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
@@ -159,8 +158,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
|
||||
@@ -15,8 +15,11 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -27,7 +30,6 @@ use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::promql::PromqlOptions;
|
||||
use frontend::Plugins;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -66,6 +68,8 @@ impl SubCommand {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -74,15 +78,17 @@ pub struct StandaloneOptions {
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub promql_options: Option<PromqlOptions>,
|
||||
pub mode: Mode,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub compaction: CompactionConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -91,10 +97,10 @@ impl Default for StandaloneOptions {
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
promql_options: Some(PromqlOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
compaction: CompactionConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,6 +108,7 @@ impl Default for StandaloneOptions {
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
http_options: self.http_options,
|
||||
grpc_options: self.grpc_options,
|
||||
mysql_options: self.mysql_options,
|
||||
@@ -110,16 +117,17 @@ impl StandaloneOptions {
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
promql_options: self.promql_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
meta_client_options: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -362,4 +370,11 @@ mod tests {
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
145
src/cmd/tests/cli.rs
Normal file
145
src/cmd/tests/cli.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use rexpect::session::PtyReplSession;
|
||||
use tempdir::TempDir;
|
||||
|
||||
struct Repl {
|
||||
repl: PtyReplSession,
|
||||
}
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
self.repl.read_line().unwrap()
|
||||
}
|
||||
|
||||
fn read_expect(&mut self, expect: &str) {
|
||||
assert_eq!(self.read_line(), expect);
|
||||
}
|
||||
|
||||
fn read_contains(&mut self, pat: &str) {
|
||||
assert!(self.read_line().contains(pat));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = TempDir::new_in("/tmp", "data").unwrap();
|
||||
let wal_dir = TempDir::new_in("/tmp", "wal").unwrap();
|
||||
|
||||
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
bin_path.push("../../target/debug");
|
||||
let bin_path = bin_path.to_str().unwrap();
|
||||
|
||||
let mut datanode = Command::new("./greptime")
|
||||
.current_dir(bin_path)
|
||||
.args([
|
||||
"datanode",
|
||||
"start",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-dir={}", data_dir.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.unwrap();
|
||||
|
||||
// wait for Datanode actually started
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
"--grpc-addr=0.0.0.0:4321",
|
||||
// history commands can sneaky into stdout and mess up our tests, so disable it
|
||||
"--disable-helper",
|
||||
]);
|
||||
let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
|
||||
let repl = PtyReplSession {
|
||||
prompt: "> ".to_string(),
|
||||
pty_session,
|
||||
quit_command: None,
|
||||
echo_on: false,
|
||||
};
|
||||
let repl = &mut Repl { repl };
|
||||
repl.read_expect("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
test_create_database(repl);
|
||||
|
||||
test_use_database(repl);
|
||||
|
||||
test_create_table(repl);
|
||||
|
||||
test_insert(repl);
|
||||
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
repl.send_line("CREATE DATABASE db;");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_use_database(repl: &mut Repl) {
|
||||
repl.send_line("USE db");
|
||||
repl.read_expect("Total Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
repl.read_expect("Using db");
|
||||
}
|
||||
|
||||
fn test_create_table(repl: &mut Repl) {
|
||||
repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
|
||||
repl.read_expect("Affected Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_insert(repl: &mut Repl) {
|
||||
repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_select(repl: &mut Repl) {
|
||||
repl.send_line("SELECT * FROM t;");
|
||||
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| x | ts |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("Total Rows: 1");
|
||||
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { path = "../error" }
|
||||
|
||||
@@ -19,3 +19,5 @@ pub mod bytes;
|
||||
pub mod readable_size;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
|
||||
|
||||
@@ -16,6 +16,6 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
chrono.workspace = true
|
||||
tempdir = "0.3"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -86,6 +86,34 @@ impl StatusCode {
|
||||
pub fn is_success(code: u32) -> bool {
|
||||
Self::Success as u32 == code
|
||||
}
|
||||
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StatusCode {
|
||||
|
||||
@@ -20,6 +20,7 @@ use static_assertions::{assert_fields, assert_impl_all};
|
||||
struct Foo {}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::extra_unused_type_parameters)]
|
||||
fn test_derive() {
|
||||
Foo::default();
|
||||
assert_fields!(Foo: input_types);
|
||||
|
||||
@@ -19,7 +19,7 @@ num-traits = "0.2"
|
||||
once_cell = "1.10"
|
||||
paste = "1.0"
|
||||
snafu.workspace = true
|
||||
statrs = "0.15"
|
||||
statrs = "0.16"
|
||||
|
||||
[dev-dependencies]
|
||||
ron = "0.7"
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropColumns, RenameTable};
|
||||
use api::v1::{column_def, AlterExpr, CreateTableExpr, DropColumns, RenameTable};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest, TableOptions,
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
ColumnNotFoundSnafu, CreateSchemaSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result,
|
||||
ColumnNotFoundSnafu, InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu,
|
||||
Result, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
@@ -42,12 +42,11 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
field: "column_def",
|
||||
})?;
|
||||
|
||||
let schema =
|
||||
column_def
|
||||
.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
})?;
|
||||
let schema = column_def::try_as_column_schema(&column_def).context(
|
||||
InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?;
|
||||
Ok(AddColumnRequest {
|
||||
column_schema: schema,
|
||||
is_key: ac.is_key,
|
||||
@@ -93,13 +92,12 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
|
||||
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
|
||||
let column_schemas = expr
|
||||
.column_defs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu { column: &x.name })
|
||||
column_def::try_as_column_schema(x).context(InvalidColumnDefSnafu { column: &x.name })
|
||||
})
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
|
||||
@@ -123,12 +121,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.context(CreateSchemaSnafu)?
|
||||
.build()
|
||||
.context(CreateSchemaSnafu)?,
|
||||
))
|
||||
Ok(RawSchema::new(column_schemas))
|
||||
}
|
||||
|
||||
pub fn create_expr_to_request(
|
||||
@@ -140,8 +133,11 @@ pub fn create_expr_to_request(
|
||||
.primary_keys
|
||||
.iter()
|
||||
.map(|key| {
|
||||
// We do a linear search here.
|
||||
schema
|
||||
.column_index_by_name(key)
|
||||
.column_schemas
|
||||
.iter()
|
||||
.position(|column_schema| column_schema.name == *key)
|
||||
.context(ColumnNotFoundSnafu {
|
||||
column_name: key,
|
||||
table_name: &expr.table_name,
|
||||
@@ -169,6 +165,8 @@ pub fn create_expr_to_request(
|
||||
expr.region_ids
|
||||
};
|
||||
|
||||
let table_options =
|
||||
TableOptions::try_from(&expr.table_options).context(UnrecognizedTableOptionSnafu)?;
|
||||
Ok(CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name,
|
||||
@@ -179,7 +177,7 @@ pub fn create_expr_to_request(
|
||||
region_numbers: region_ids,
|
||||
primary_key_indices,
|
||||
create_if_not_exists: expr.create_if_not_exists,
|
||||
table_options: expr.table_options,
|
||||
table_options,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -40,12 +40,6 @@ pub enum Error {
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create schema when creating table, source: {}", source))]
|
||||
CreateSchema {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Duplicated timestamp column in gRPC requests, exists {}, duplicated: {}",
|
||||
exists,
|
||||
@@ -90,6 +84,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -102,14 +102,15 @@ impl ErrorExt for Error {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ColumnDataType { .. } => StatusCode::Internal,
|
||||
Error::CreateSchema { .. }
|
||||
| Error::DuplicatedTimestampColumn { .. }
|
||||
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
|
||||
Error::DuplicatedTimestampColumn { .. } | Error::MissingTimestampColumn { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateVector { .. } => StatusCode::InvalidArguments,
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
|
||||
@@ -96,9 +96,7 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
|
||||
|
||||
for i in 0..rows {
|
||||
if let Some(true) = nulls_iter.next() {
|
||||
vector
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)?;
|
||||
vector.push_null();
|
||||
} else {
|
||||
let value_ref = values_iter
|
||||
.next()
|
||||
@@ -109,16 +107,12 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
|
||||
),
|
||||
})?;
|
||||
vector
|
||||
.push_value_ref(value_ref)
|
||||
.try_push_value_ref(value_ref)
|
||||
.context(CreateVectorSnafu)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(0..rows).try_for_each(|_| {
|
||||
vector
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)
|
||||
})?;
|
||||
(0..rows).for_each(|_| vector.push_null());
|
||||
}
|
||||
Ok(vector.to_vector())
|
||||
}
|
||||
@@ -324,7 +318,7 @@ fn add_values_to_builder(
|
||||
|
||||
values.iter().try_for_each(|value| {
|
||||
builder
|
||||
.push_value_ref(value.as_value_ref())
|
||||
.try_push_value_ref(value.as_value_ref())
|
||||
.context(CreateVectorSnafu)
|
||||
})?;
|
||||
} else {
|
||||
@@ -337,12 +331,10 @@ fn add_values_to_builder(
|
||||
let mut idx_of_values = 0;
|
||||
for idx in 0..row_count {
|
||||
match is_null(&null_mask, idx) {
|
||||
Some(true) => builder
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)?,
|
||||
Some(true) => builder.push_null(),
|
||||
_ => {
|
||||
builder
|
||||
.push_value_ref(values[idx_of_values].as_value_ref())
|
||||
.try_push_value_ref(values[idx_of_values].as_value_ref())
|
||||
.context(CreateVectorSnafu)?;
|
||||
idx_of_values += 1
|
||||
}
|
||||
@@ -427,8 +419,9 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) => unreachable!(),
|
||||
ConcreteDataType::List(_) => unreachable!(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,18 +18,20 @@ use std::time::Duration;
|
||||
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
use snafu::ResultExt;
|
||||
use tonic::transport::{Channel as InnerChannel, Endpoint, Uri};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::transport::{
|
||||
Certificate, Channel as InnerChannel, ClientTlsConfig, Endpoint, Identity, Uri,
|
||||
};
|
||||
use tower::make::MakeConnection;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsConfigSnafu, Result};
|
||||
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ChannelManager {
|
||||
config: ChannelConfig,
|
||||
client_tls_config: Option<ClientTlsConfig>,
|
||||
pool: Arc<Pool>,
|
||||
}
|
||||
|
||||
@@ -52,7 +54,37 @@ impl ChannelManager {
|
||||
recycle_channel_in_loop(cloned_pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
|
||||
Self { config, pool }
|
||||
Self {
|
||||
config,
|
||||
client_tls_config: None,
|
||||
pool,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
|
||||
let mut cm = Self::with_config(config.clone());
|
||||
|
||||
// setup tls
|
||||
let path_config = config.client_tls.context(InvalidTlsConfigSnafu {
|
||||
msg: "no config input",
|
||||
})?;
|
||||
|
||||
let server_root_ca_cert = std::fs::read_to_string(path_config.server_ca_cert_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let server_root_ca_cert = Certificate::from_pem(server_root_ca_cert);
|
||||
let client_cert = std::fs::read_to_string(path_config.client_cert_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let client_key = std::fs::read_to_string(path_config.client_key_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let client_identity = Identity::from_pem(client_cert, client_key);
|
||||
|
||||
cm.client_tls_config = Some(
|
||||
ClientTlsConfig::new()
|
||||
.ca_certificate(server_root_ca_cert)
|
||||
.identity(client_identity),
|
||||
);
|
||||
|
||||
Ok(cm)
|
||||
}
|
||||
|
||||
pub fn config(&self) -> &ChannelConfig {
|
||||
@@ -119,8 +151,7 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
|
||||
let mut endpoint =
|
||||
Endpoint::new(format!("http://{addr}")).context(error::CreateChannelSnafu)?;
|
||||
let mut endpoint = Endpoint::new(format!("http://{addr}")).context(CreateChannelSnafu)?;
|
||||
|
||||
if let Some(dur) = self.config.timeout {
|
||||
endpoint = endpoint.timeout(dur);
|
||||
@@ -152,6 +183,12 @@ impl ChannelManager {
|
||||
if let Some(enabled) = self.config.http2_adaptive_window {
|
||||
endpoint = endpoint.http2_adaptive_window(enabled);
|
||||
}
|
||||
if let Some(tls_config) = &self.client_tls_config {
|
||||
endpoint = endpoint
|
||||
.tls_config(tls_config.clone())
|
||||
.context(CreateChannelSnafu)?;
|
||||
}
|
||||
|
||||
endpoint = endpoint
|
||||
.tcp_keepalive(self.config.tcp_keepalive)
|
||||
.tcp_nodelay(self.config.tcp_nodelay);
|
||||
@@ -160,6 +197,13 @@ impl ChannelManager {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ClientTlsOption {
|
||||
pub server_ca_cert_path: String,
|
||||
pub client_cert_path: String,
|
||||
pub client_key_path: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ChannelConfig {
|
||||
pub timeout: Option<Duration>,
|
||||
@@ -174,6 +218,7 @@ pub struct ChannelConfig {
|
||||
pub http2_adaptive_window: Option<bool>,
|
||||
pub tcp_keepalive: Option<Duration>,
|
||||
pub tcp_nodelay: bool,
|
||||
pub client_tls: Option<ClientTlsOption>,
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
@@ -191,6 +236,7 @@ impl Default for ChannelConfig {
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -307,6 +353,16 @@ impl ChannelConfig {
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the value of tls client auth.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn client_tls_config(self, client_tls_option: ClientTlsOption) -> Self {
|
||||
Self {
|
||||
client_tls: Some(client_tls_option),
|
||||
..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -401,7 +457,11 @@ mod tests {
|
||||
async fn test_access_count() {
|
||||
let pool = Arc::new(Pool::default());
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = Arc::new(ChannelManager { pool, config });
|
||||
let mgr = Arc::new(ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
});
|
||||
let addr = "test_uri";
|
||||
|
||||
let mut joins = Vec::with_capacity(10);
|
||||
@@ -443,6 +503,7 @@ mod tests {
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
},
|
||||
default_cfg
|
||||
);
|
||||
@@ -459,7 +520,12 @@ mod tests {
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.http2_adaptive_window(true)
|
||||
.tcp_keepalive(Duration::from_secs(2))
|
||||
.tcp_nodelay(false);
|
||||
.tcp_nodelay(false)
|
||||
.client_tls_config(ClientTlsOption {
|
||||
server_ca_cert_path: "some_server_path".to_string(),
|
||||
client_cert_path: "some_cert_path".to_string(),
|
||||
client_key_path: "some_key_path".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ChannelConfig {
|
||||
@@ -475,6 +541,11 @@ mod tests {
|
||||
http2_adaptive_window: Some(true),
|
||||
tcp_keepalive: Some(Duration::from_secs(2)),
|
||||
tcp_nodelay: false,
|
||||
client_tls: Some(ClientTlsOption {
|
||||
server_ca_cert_path: "some_server_path".to_string(),
|
||||
client_cert_path: "some_cert_path".to_string(),
|
||||
client_key_path: "some_key_path".to_string(),
|
||||
}),
|
||||
},
|
||||
cfg
|
||||
);
|
||||
@@ -496,7 +567,11 @@ mod tests {
|
||||
.http2_adaptive_window(true)
|
||||
.tcp_keepalive(Duration::from_secs(2))
|
||||
.tcp_nodelay(true);
|
||||
let mgr = ChannelManager { pool, config };
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
|
||||
let res = mgr.build_endpoint("test_addr");
|
||||
|
||||
@@ -512,7 +587,11 @@ mod tests {
|
||||
let pool = Arc::new(pool);
|
||||
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = ChannelManager { pool, config };
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
|
||||
let addr = "test_addr";
|
||||
let res = mgr.get(addr);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::io;
|
||||
|
||||
use common_error::prelude::{ErrorExt, StatusCode};
|
||||
use snafu::{Backtrace, ErrorCompat, Snafu};
|
||||
@@ -22,6 +23,15 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid client tls config, {}", msg))]
|
||||
InvalidTlsConfig { msg: String },
|
||||
|
||||
#[snafu(display("Invalid config file path, {}", source))]
|
||||
InvalidConfigFilePath {
|
||||
source: io::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
@@ -81,7 +91,9 @@ pub enum Error {
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::MissingField { .. }
|
||||
Error::InvalidTlsConfig { .. }
|
||||
| Error::InvalidConfigFilePath { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::TypeMismatch { .. }
|
||||
| Error::InvalidFlightData { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::column::{SemanticType, Values};
|
||||
use api::helper::values_with_capacity;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{Column, ColumnDataType};
|
||||
use common_base::BitVec;
|
||||
use snafu::ensure;
|
||||
@@ -212,7 +213,7 @@ impl LinesWriter {
|
||||
batch.0.push(Column {
|
||||
column_name: column_name.to_string(),
|
||||
semantic_type: semantic_type.into(),
|
||||
values: Some(Values::with_capacity(datatype, to_insert)),
|
||||
values: Some(values_with_capacity(datatype, to_insert)),
|
||||
datatype: datatype as i32,
|
||||
null_mask: Vec::default(),
|
||||
});
|
||||
|
||||
57
src/common/grpc/tests/mod.rs
Normal file
57
src/common/grpc/tests/mod.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mtls_config() {
|
||||
// test no config
|
||||
let config = ChannelConfig::new();
|
||||
let re = ChannelManager::with_tls_config(config);
|
||||
assert!(re.is_err());
|
||||
|
||||
// test wrong file
|
||||
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
|
||||
server_ca_cert_path: "tests/tls/wrong_server.cert.pem".to_string(),
|
||||
client_cert_path: "tests/tls/wrong_client.cert.pem".to_string(),
|
||||
client_key_path: "tests/tls/wrong_client.key.pem".to_string(),
|
||||
});
|
||||
|
||||
let re = ChannelManager::with_tls_config(config);
|
||||
assert!(re.is_err());
|
||||
|
||||
// test corrupted file content
|
||||
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
|
||||
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
|
||||
client_cert_path: "tests/tls/client.cert.pem".to_string(),
|
||||
client_key_path: "tests/tls/corrupted".to_string(),
|
||||
});
|
||||
|
||||
let re = ChannelManager::with_tls_config(config);
|
||||
assert!(re.is_ok());
|
||||
let re = re.unwrap().get("127.0.0.1:0");
|
||||
assert!(re.is_err());
|
||||
|
||||
// success
|
||||
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
|
||||
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
|
||||
client_cert_path: "tests/tls/client.cert.pem".to_string(),
|
||||
client_key_path: "tests/tls/client.key.pem".to_string(),
|
||||
});
|
||||
|
||||
let re = ChannelManager::with_tls_config(config);
|
||||
assert!(re.is_ok());
|
||||
let re = re.unwrap().get("127.0.0.1:0");
|
||||
assert!(re.is_ok());
|
||||
}
|
||||
36
src/common/grpc/tests/tls/client.cert.pem
Normal file
36
src/common/grpc/tests/tls/client.cert.pem
Normal file
@@ -0,0 +1,36 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGOzCCBCOgAwIBAgIBATANBgkqhkiG9w0BAQsFADCBhzELMAkGA1UEBhMCSU4x
|
||||
EjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQkFOR0FMT1JFMRUwEwYDVQQK
|
||||
DAxHb0xpbnV4Q2xvdWQxEjAQBgNVBAMMCWNhLXNlcnZlcjElMCMGCSqGSIb3DQEJ
|
||||
ARYWYWRtaW5AZ29saW51eGNsb3VkLmNvbTAeFw0yMzAyMTQxMTM4MDFaFw0yNzA4
|
||||
MjIxMTM4MDFaMHIxCzAJBgNVBAYTAklOMRIwEAYDVQQIDAlLYXJuYXRha2ExFTAT
|
||||
BgNVBAoMDEdvTGludXhDbG91ZDERMA8GA1UEAwwIc2VydmVyLTIxJTAjBgkqhkiG
|
||||
9w0BCQEWFmFkbWluQGdvbGludXhjbG91ZC5jb20wggIiMA0GCSqGSIb3DQEBAQUA
|
||||
A4ICDwAwggIKAoICAQDNPiXZFK1cDOevdU5628xqAZjHn2e86hD9ih0IHvQKbcAm
|
||||
a8fhFMQ+Gki+p2+Ga1fxHDi1+aUn00UjyLAxSMQVulpZWYHsRj3koyD9LyTvpDQk
|
||||
SwJhFNtL33WlqUMtjgVXoznjECfhc/hwKJ9BS0b5j21XzqYkSKTJNcxZmoNLJVvL
|
||||
dfbsWjLywSAHbcF1gs2w3IxruPQwyMXL1URjcwGRTtK+zk6QGxgyXsIEJDW4EZqR
|
||||
xXgmEz7jx7vfDLaYc8GoujTki2dkyTWQkdDrJ4/N7VWGOGjL60EJDOcQyCowDuAq
|
||||
sbB5C9OuhB59o2/wzeSeaY7qS5nLOufwiYmvc1S6kgi9emirxqFLmrcaJv8QPDEX
|
||||
6ufI8wSkCS/CX/IUNXPkSripU3zQcjorinAw3w9pGY1VNknz5AgDXrEAW17aZKsp
|
||||
QyLSyl87vG9dhjybdkc7QyBghTxweggYT1INY6dmj9ijIyU+9V64xOTb9dlbgLW/
|
||||
qAvZyeq2H9Z5aBwkG31n1b2rX0JEK+/NC+8PRs2tWq63EOB8hzh4mF9RKLcZC3zS
|
||||
9eJa1B0ugyy5fw8GGWA49H3rFoU2u7+Gazzdn5uD9sqLuVnzW1FREDhMHGd4VdRx
|
||||
vuhUp9jz9u0WDRr2Ix7N7Vd57mwhBPivUywg7QwZSTqlIrGVoQFPL4BjWwSSswID
|
||||
AQABo4HFMIHCMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMDMGCWCGSAGG
|
||||
+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBDbGllbnQgQ2VydGlmaWNhdGUwHQYD
|
||||
VR0OBBYEFI056bMc2jHoeOTUGBCpBGGY/UfQMB8GA1UdIwQYMBaAFKVZwpSJCPkN
|
||||
wGXyJX1sl2Pbby4FMA4GA1UdDwEB/wQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcD
|
||||
AgYIKwYBBQUHAwQwDQYJKoZIhvcNAQELBQADggIBABHQ/EGnAFeIdzKTbaP3kaSd
|
||||
A3tCyjWVwo9eULXBjsMFFyf4NDw8bkrYdJos6rBpzi6R1PUb4UMc9CUF6ee9zbTK
|
||||
mDeusqwhDOLmYZot1aZbujMngpbMoQx5keSQ9Eg10npbYMl6Sq3qFbAST9l/hlDh
|
||||
Ue9KhfrAvrSobP0WWb/EpEXZMt2DafKpoz4nvtFpcOO5kbsQ+/eQfWHmR/k6sCYG
|
||||
UycFYCJCFQz2xG8wtbExg5iyaR3nE0LfqZwRxhIa4iSWlCecYc1XUJnOh8fIeop4
|
||||
9fD5k2wqvCEBAZiaKg2RYbaw6LIFkg7c99B4Gt5eez7Bs878T7lS+xl9wbzinzez
|
||||
WFIgsDYHYjmK8s5WXXWwT7UhqSA12FHOp8grqFllXV/dOPTFz+dq9Mn1VGgH6MS4
|
||||
Ls3r2LH5ycAz+gkoY2wlnF++ItpB2K3LTlqk+OvQZ1oXMq8u5F6XsM7Uirc7Da+9
|
||||
MEG1zBpGvA/iAd2kKd3APS+EuoytSt022bD7YDJ1isuxT5q2Hpa4p14BJHCgDKTZ
|
||||
vPYIdzCh05vwLwB28T8bh7s5OLOcRY9KmxVPkT0SYLOk11j5nZ1N/hQvGDxL60e2
|
||||
RBS3ADHkymIE55Xf1VLXcs17zR9fLV+5fiSQ40FLjcBEjhkvrzcDe3tVFsA/ty9h
|
||||
dBCSsexiXj/S5KwKtz/c
|
||||
-----END CERTIFICATE-----
|
||||
51
src/common/grpc/tests/tls/client.key.pem
Normal file
51
src/common/grpc/tests/tls/client.key.pem
Normal file
@@ -0,0 +1,51 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKQIBAAKCAgEAzT4l2RStXAznr3VOetvMagGYx59nvOoQ/YodCB70Cm3AJmvH
|
||||
4RTEPhpIvqdvhmtX8Rw4tfmlJ9NFI8iwMUjEFbpaWVmB7EY95KMg/S8k76Q0JEsC
|
||||
YRTbS991palDLY4FV6M54xAn4XP4cCifQUtG+Y9tV86mJEikyTXMWZqDSyVby3X2
|
||||
7Foy8sEgB23BdYLNsNyMa7j0MMjFy9VEY3MBkU7Svs5OkBsYMl7CBCQ1uBGakcV4
|
||||
JhM+48e73wy2mHPBqLo05ItnZMk1kJHQ6yePze1Vhjhoy+tBCQznEMgqMA7gKrGw
|
||||
eQvTroQefaNv8M3knmmO6kuZyzrn8ImJr3NUupIIvXpoq8ahS5q3Gib/EDwxF+rn
|
||||
yPMEpAkvwl/yFDVz5Eq4qVN80HI6K4pwMN8PaRmNVTZJ8+QIA16xAFte2mSrKUMi
|
||||
0spfO7xvXYY8m3ZHO0MgYIU8cHoIGE9SDWOnZo/YoyMlPvVeuMTk2/XZW4C1v6gL
|
||||
2cnqth/WeWgcJBt9Z9W9q19CRCvvzQvvD0bNrVqutxDgfIc4eJhfUSi3GQt80vXi
|
||||
WtQdLoMsuX8PBhlgOPR96xaFNru/hms83Z+bg/bKi7lZ81tRURA4TBxneFXUcb7o
|
||||
VKfY8/btFg0a9iMeze1Xee5sIQT4r1MsIO0MGUk6pSKxlaEBTy+AY1sEkrMCAwEA
|
||||
AQKCAgEAw2jBZj5+k96hk/dPIkA1DlS43o7RmRcN2CdwXrQBzBAUW0BRDObVtP8X
|
||||
dZY647M+BozFHdUzPoizEk/YGQRb1QgZT2qd/ZQfB5mdJhGFzDf9gPR9rmrKJCH8
|
||||
hB50nGHUik0ZJyvRnKDqz/aNMgB28dJx26Efo/oaEoyLJGCtUpWeIUgOMZfrXB8t
|
||||
3ITOJZDFP/esJj/xFqWBVQGXXEw6GNwAYLRSLnftgL+hX4oOL1NrZBCrxSybuwkG
|
||||
wWX8T4gewQOQqmxjo5zCyANc8xc2nmyx+dmpRUWWJQTI1ryNFjaDjYKiL41oHIcj
|
||||
9KDwSkftvDlqXX5fThSmkeiRU5+t8UMj4+Bt7opCzIlwHtQe+95BqiXQ7bHfCjn7
|
||||
GvShZgHo45rDkfwWDz/pYhHQ2Wb9DkhEtwa0cu3mDMGc6BY+4yo+Vz6Rk1TypxQw
|
||||
LIa43WgVCRm66Mq65sObx7wkdxvolUE8j1Io3AHwgeBjV+gISV9srj2m/HnOmFFb
|
||||
16SKQEDEVoaci+v6DT8A7UOZH4sgYSbknHdjMy6c6UlYgd8UNqbY3h/ohZ2JOcPd
|
||||
8DqGUDGKbpS7OxWogxb9K++6SPSn86sPmUjzRPMgijVjU5pyK42DpZj1/RIe8Tml
|
||||
JXVqHuZvURK4Qi3ECQ09m9vQ9nS88HMRVJ7sFSca6HOFYSFyAfkCggEBAPN35hva
|
||||
OhbgQlFJrpo5YDYS5v7l7YjLbry6DaCR1CpYaKlTPkc4tiznCUHe1N41mR4qu2Tc
|
||||
4+m7GN9BZfLU8w/Jvrp7mAO7fZXZtIzTQrZQDbAZppUGBbGBoAOlLVxR4NrN2TSk
|
||||
49Ljj87UynhxeCv6RWx0F1p1/VIZertLELbSdb3C43pAsNSXzbkb7LtT9RXemyUL
|
||||
LBK4ugcXMSZrzHJK1Ct31LoGd9m+TEp/VW2aGMeWliuIticJx44OW4tlJ70qKrd0
|
||||
KezBZVMHPa3FqW7kdYwdlISoqZsE9OVPgLCQNVLhDO1YMaTl3WEHKTRBxTF70pvv
|
||||
zMkSRQGoU4ff7AUCggEBANfOkCsx2mRvJV+UYxW8R6510w2H1bNbNRbfTJAo8kld
|
||||
/7dXU4H3QrhUrCsSyc5ijm09q7I4+rc+uMxfT/R1mO5tq9AueCWhg85WV+NBR1FE
|
||||
Yg7MX+zblpHqUDQoTj9vvgwLyqvZ7k9NON42Zz+Tj2worICnVlDvahm/3NaItT9B
|
||||
oGhsEoJjYFK4Hq7RwosU+KPXkQBxWzrNLipo8jx0XFpPZVHSLIFs9eW25bnj/qxc
|
||||
toMgx4IsvEDlzS/oqfycCrDdKwqiW74w0Djb5TiJv+dYzl9GnN6istqbUTNZkJjn
|
||||
lkbmegrtfz3Yd1ORvjkNqHuANyuR+YnUSIsb0PV5eVcCggEAck5bgb4eQbk+SY3P
|
||||
ZOcFLb4IJ6ppsCzaq86qMTXmJ49kbAMCHUwZ89DwvrVQuZbucYRcgMlYU9ccoUzC
|
||||
AZVLHKF6Y3E9eJshJiaVJvzUuGWzV3djh1nReHpEVxHIzyw95lx42seDkvJ2BQRQ
|
||||
nuWfJv6Uc4u5nyYALfh6b86ZZUxALTx/slkG7HjtBDiBF54eVgsySd0J7yw9YrDX
|
||||
yZMY5JwPKu1SuZfp0xgOF3fa8t9DPQmNLZk88+0afK5u+m4ejyhp78GhIV/XI3kl
|
||||
0x0XJFIsggEtRm8tWfOkyrhd0geSkXvJpvEeNa4aFsDW7ormewoIYl/ehJSIQ3P0
|
||||
67kMxQKCAQA12iP7w2r+GQY4fazkJaG1lU1fWQAoy5/J31sZtj4PtNc1ByOdkPgj
|
||||
S23TKdMWH13vQK5xwOo/g/VVeotXM2lARjnTr2Tn7xAXE1DHMuj7DJdznehqELnY
|
||||
G6J8AXrVNas1ElQ24iEnxNtmCClnogjuMpApYpiVhcjyOACBwIeKC3Rd2mocA3Rr
|
||||
7+ooMcvcLRWGvSo/9AmR+NWGW73m/Bp3psxfyJS2j1wlQKi+5HgOxuv8eNeQUl1/
|
||||
zFiRlfulP8MjM22kL7O5GDE9nxHqM+Whc3W8LMDEhdEf4BY5PCZrIY9MjgLyayWP
|
||||
Z08PmZTgY9ohR3N8+eZNUJ3xqLVSLEftAoIBAQDF1K8lPXAs8e4V0oc9hq4GFLvi
|
||||
E0KC+8X1ShzvkVGV/3Kz1FJ0bwix/M3C5XSSNguxHI6CG2GprJlExp1qqwlvmGr2
|
||||
hHdfemvq6tF4qjXLgPXvgoWocBGNUvBXxFVuc0hOHgT/X3+GsPYtNvZb3fp+4Bm6
|
||||
ugUu05drqrHSOY5kUbU3jf/5KctnDFmOsSeOgGiI/JJWVcKJALpDkhazRL0nxfuW
|
||||
6xU6pZazhCAby2Qn+wn0xyi4bEZSNobiQTgOXOC0DA1uGD3XHctCMnSBtYtocQjq
|
||||
IFT2l3u4pEKpVQwuc4+yObWUT47oBxV6vFneXsnV89vd2SSUPuR8GIYYeA+/
|
||||
-----END RSA PRIVATE KEY-----
|
||||
50
src/common/grpc/tests/tls/corrupted
Normal file
50
src/common/grpc/tests/tls/corrupted
Normal file
@@ -0,0 +1,50 @@
|
||||
rWtZ7U3SoVAl6yMhfJsB
|
||||
LcEGbuCfgFxk2ADw0N1G
|
||||
byTKlrUgoRZeSc0cYHTf
|
||||
0XjbRCBtMV9yYaVJKPwi
|
||||
rGofQgFoc1lW0U5x2bnN
|
||||
O9nn9aDe5t5LAlGS81uX
|
||||
aBMvuzVjHbZKOlabXl4W
|
||||
ZJc06qngAcQWQUu8nAnR
|
||||
FLsjhoaTyuaDMY3OWJAx
|
||||
5Dt7YglND5uFAqYwRG9L
|
||||
agLGOCH8suwnXGYaPxjM
|
||||
Ysb5RANkpgcbSulLZiic
|
||||
4sLmpJomjokwZbctODVW
|
||||
pCLiQT3wWDJ7YjIePR6g
|
||||
P3Jlg0LDhbgSwXxgjjUR
|
||||
6qGRfcb8LFlVlT7O1ze2
|
||||
lFBNWzijkPeKyKmwpOSa
|
||||
oGCR2OUg71n0Tzt2a3ir
|
||||
WLijq0bL1Cetz24fv738
|
||||
L3MEAwezFBW38U4QilNz
|
||||
uza1bC3PgToermGSgKLx
|
||||
WMdgjZIszK4t6Rehelx8
|
||||
YpCJWVXTob3Gn4bMwWJO
|
||||
xpJ9qhvMBdD8iamheF4b
|
||||
bUm1YmHW4gPT1ujiqCmN
|
||||
I7hOFurjJ6zvXGETyfCn
|
||||
w23W8PNFWbqpHUKN59Bz
|
||||
HpbsIRDVVpEGxnoWmdjq
|
||||
58BUOxDdbTZxCKt0UqLD
|
||||
uUPOlW8bRhuC1tK1NL5u
|
||||
wq9ybcfwZ4jIHyYlHZ5M
|
||||
4t4zKLRG2DN6icHmctOW
|
||||
TzYp3np0OFsTlzCwkogM
|
||||
Os6SOvjU0Irq2Xo5wLvn
|
||||
1nN6FQwUxcw0H5rfQEZo
|
||||
NioHP0JdBv3HmIaQZs1n
|
||||
8lJWLVof1TBWtRUKmWmO
|
||||
79DcTURdzt28Vdn6F0K0
|
||||
UiG15bda4Pb81I9IE9ug
|
||||
iZkC7CE98aE6WQK9Ghlu
|
||||
dNXJTkUD3uVg6Tqi3957
|
||||
Hfa9xMclyrxsOvkGcudI
|
||||
QbcvG5Apom6nBWIGHRMQ
|
||||
68rn9eZEcq5mJLaiNmHr
|
||||
5AOtHddC5NVgQLgdmmKb
|
||||
gQlrcSXzxT6V6jzbxZ79
|
||||
xmulvmkeqG4kj6TAuJEg
|
||||
u9dCkExxv5tLSpF8hC08
|
||||
HHU4QE56UC97djO5EpmK
|
||||
g3rElyboRHlAYPWviWbm
|
||||
40
src/common/grpc/tests/tls/server.cert.pem
Normal file
40
src/common/grpc/tests/tls/server.cert.pem
Normal file
@@ -0,0 +1,40 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIG+jCCBOKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBhzELMAkGA1UEBhMCSU4x
|
||||
EjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQkFOR0FMT1JFMRUwEwYDVQQK
|
||||
DAxHb0xpbnV4Q2xvdWQxEjAQBgNVBAMMCWNhLXNlcnZlcjElMCMGCSqGSIb3DQEJ
|
||||
ARYWYWRtaW5AZ29saW51eGNsb3VkLmNvbTAeFw0yMzAyMTQxMTM5NDBaFw0yNzA4
|
||||
MjIxMTM5NDBaMHAxCzAJBgNVBAYTAklOMRIwEAYDVQQIDAlLYXJuYXRha2ExFTAT
|
||||
BgNVBAoMDEdvTGludXhDbG91ZDEPMA0GA1UEAwwGc2VydmVyMSUwIwYJKoZIhvcN
|
||||
AQkBFhZhZG1pbkBnb2xpbnV4Y2xvdWQuY29tMIICIjANBgkqhkiG9w0BAQEFAAOC
|
||||
Ag8AMIICCgKCAgEAvVtxAoRjLRs3Ei4+CgzqJ2+bpc0sBdUm/4LM/D+0KbXxwD7w
|
||||
HP6GcKl/9zf9GJg56pVXxXMaerMDLS4Est25+mBgqcePC6utCBYrKA25pKbkFkxZ
|
||||
TPh9/R4RHGVJ3KHy9vc4VzqoV7XFMJFFUQ2fQywHZlXh6MNz0WPTIGaH7hvYoHbK
|
||||
I3NpPq8TjRuuV61XB0hK+RW0K6/5Yuj74h/mfheX1VIUOjGwKnTPccZQAlrKYjeW
|
||||
BZBS4YqahkTIaGLa06SdUSkuhL85rqAxWvhK9GIRlQLNYJOzg+E3jGyqf566xX60
|
||||
fxM6alLYf+ZzCwSBuDDj5f+j752gPLYUI82YL4xQ+AEHNR8U1uMvt0EzzFt7mSRe
|
||||
fobVr+Y2zpci+mo7kcQGOhenzGclsm+qXwMhYUnJcOYFZWtTJlFaaPreL4M3Dh+2
|
||||
pmKj23ZU6zcT3MYtE6phjCLJl0DsFIcOn+tSqMdpwB20EeQjo9bVJuw/HJrlpcnY
|
||||
U9aLsnm/4Ls5A0BQutZnxKBIJjpzp8VfK0WU8a4iKok3AS0z1/K+atNrgSUB9DCH
|
||||
0MvLqqQmM9TdLcZj7NSEfLyyFVwPRc5dt4CrNDL7JUpMzt36ezU83JU+nfqWDZsL
|
||||
+2JOaE4gGLZDcA3cfP83/mYRaAnYW/9W4vEnIpa6subzq1aFOeY/3dKLTx8CAwEA
|
||||
AaOCAYUwggGBMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG
|
||||
+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYD
|
||||
VR0OBBYEFLijeA+RFDQtuVeMUkaXqF7LF50GMIG8BgNVHSMEgbQwgbGAFKVZwpSJ
|
||||
CPkNwGXyJX1sl2Pbby4FoYGNpIGKMIGHMQswCQYDVQQGEwJJTjESMBAGA1UECAwJ
|
||||
S2FybmF0YWthMRIwEAYDVQQHDAlCQU5HQUxPUkUxFTATBgNVBAoMDEdvTGludXhD
|
||||
bG91ZDESMBAGA1UEAwwJY2Etc2VydmVyMSUwIwYJKoZIhvcNAQkBFhZhZG1pbkBn
|
||||
b2xpbnV4Y2xvdWQuY29tggkA7NvbvF8jodEwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
|
||||
JQQMMAoGCCsGAQUFBwMBMCkGA1UdEQQiMCCHBMCoAHKHBAoAAg+CEnNlcnZlci5l
|
||||
eGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEAXvaS9+y5g2Kw/4EPsnhjpN1v
|
||||
CxXW0+UYSWOaxVJdEAjGQI/1m9LOiF9IHImmiwluJ/Bex1TzuaTCKmpluPwGvd9D
|
||||
Zgf0A5SmVqW4WTT4d2nSecxw4OICJ3j6ubKkvMVf9s+ZJwb+fMMUaSt80bWqp1TY
|
||||
XbZguv67PkBECPqVe6rgzXnTLwM3lE8EgG8VtM3IOy9a5SIEjm5L8SQ2I2hiytmE
|
||||
e4jR1fbZsB5NbBdfA3GFMKQEE2dIymkG3Bz71M3tZi1y4RnHtRKdrFtrIlgclrwd
|
||||
nVnQn/NiXUOOzsL2+vwSF32SSbiLvOxu63qO1YDBkKVChog3P/2f6xcJ23wkbHlL
|
||||
qaL2jvLo6ylvMPUYHf5ZWat5zayaGUMHYDKcbD4Dw7aY3M0tNgEHdqUqNePmKvmn
|
||||
luyXof3KmmLgWlcfBoX96a7hXDtxFyB2N4nzfQBXh+0VAlgqa+ZZhpdEqRQaWkkR
|
||||
MDBdsVJ9O3812IaNfMzpS1vb701GFDCM5Hcyw6a/v6Ln08NMhYut4saLi13kHilS
|
||||
Wq7wOAfW3rzxuhjOJJxsi0jJNI775q+a/BbbG/CPl826bXPGH43BdPV8mKwsX5HM
|
||||
wwDKf3otP/v7bxwJabfhv2EKUy+W1kkFW9FEZ919yTtfhSDrTNcrXtE7RkiAepfm
|
||||
95I025URIlhJGLGBUlA=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -13,9 +13,11 @@ futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
futures-util.workspace = true
|
||||
tempdir = "0.3"
|
||||
|
||||
@@ -69,6 +69,18 @@ pub enum Error {
|
||||
key: String,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize from json, source: {}", source))]
|
||||
FromJson {
|
||||
source: serde_json::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Procedure exec failed, source: {}", source))]
|
||||
RetryLater {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -81,7 +93,9 @@ impl ErrorExt for Error {
|
||||
| Error::PutState { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::ListState { .. }
|
||||
| Error::ReadState { .. } => StatusCode::Internal,
|
||||
| Error::ReadState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryLater { .. } => StatusCode::Internal,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
@@ -104,4 +118,26 @@ impl Error {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] error from source `err`.
|
||||
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine whether it is a retry later type through [StatusCode]
|
||||
pub fn is_retry_later(&self) -> bool {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] or [Error::External] error from source `err` according
|
||||
/// to its [StatusCode].
|
||||
pub fn from_error_ext<E: ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
if err.status_code().is_retryable() {
|
||||
Error::retry_later(err)
|
||||
} else {
|
||||
Error::external(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,15 +15,12 @@
|
||||
//! Common traits and structures for the procedure framework.
|
||||
|
||||
pub mod error;
|
||||
#[allow(dead_code)]
|
||||
mod local;
|
||||
pub mod local;
|
||||
mod procedure;
|
||||
// TODO(yingwen): Remove this attribute once ProcedureManager is implemented.
|
||||
#[allow(dead_code)]
|
||||
mod store;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
pub use crate::procedure::{
|
||||
BoxedProcedure, Context, LockKey, Procedure, ProcedureId, ProcedureManager,
|
||||
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status,
|
||||
BoxedProcedure, Context, ContextProvider, LockKey, Procedure, ProcedureId, ProcedureManager,
|
||||
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, Watcher,
|
||||
};
|
||||
|
||||
@@ -13,19 +13,27 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod lock;
|
||||
mod runner;
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::logging;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ensure;
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::{LockKey, ProcedureId, ProcedureState};
|
||||
|
||||
/// Mutable metadata of a procedure during execution.
|
||||
#[derive(Debug)]
|
||||
struct ExecMeta {
|
||||
/// Current procedure state.
|
||||
state: ProcedureState,
|
||||
}
|
||||
use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
|
||||
use crate::local::lock::LockMap;
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::BoxedProcedureLoader;
|
||||
use crate::store::{ObjectStateStore, ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
|
||||
ProcedureWithId, Watcher,
|
||||
};
|
||||
|
||||
/// Shared metadata of a procedure.
|
||||
///
|
||||
@@ -36,7 +44,7 @@ struct ExecMeta {
|
||||
/// for children;
|
||||
/// 2. always use `notify_one` and ensure there are only one waiter.
|
||||
#[derive(Debug)]
|
||||
struct ProcedureMeta {
|
||||
pub(crate) struct ProcedureMeta {
|
||||
/// Id of this procedure.
|
||||
id: ProcedureId,
|
||||
/// Notify to wait for a lock.
|
||||
@@ -45,70 +53,655 @@ struct ProcedureMeta {
|
||||
parent_id: Option<ProcedureId>,
|
||||
/// Notify to wait for subprocedures.
|
||||
child_notify: Notify,
|
||||
/// Locks inherted from the parent procedure.
|
||||
parent_locks: Vec<LockKey>,
|
||||
/// Lock not in `parent_locks` but required by this procedure.
|
||||
///
|
||||
/// If the parent procedure already owns the lock that this procedure
|
||||
/// needs, we set this field to `None`.
|
||||
lock_key: Option<LockKey>,
|
||||
/// Mutable status during execution.
|
||||
exec_meta: Mutex<ExecMeta>,
|
||||
/// Lock required by this procedure.
|
||||
lock_key: LockKey,
|
||||
/// Sender to notify the procedure state.
|
||||
state_sender: Sender<ProcedureState>,
|
||||
/// Receiver to watch the procedure state.
|
||||
state_receiver: Receiver<ProcedureState>,
|
||||
/// Id of child procedures.
|
||||
children: Mutex<Vec<ProcedureId>>,
|
||||
}
|
||||
|
||||
impl ProcedureMeta {
|
||||
/// Return all locks the procedure needs.
|
||||
fn locks_needed(&self) -> Vec<LockKey> {
|
||||
let num_locks = self.parent_locks.len() + if self.lock_key.is_some() { 1 } else { 0 };
|
||||
let mut locks = Vec::with_capacity(num_locks);
|
||||
locks.extend_from_slice(&self.parent_locks);
|
||||
if let Some(key) = &self.lock_key {
|
||||
locks.push(key.clone());
|
||||
fn new(id: ProcedureId, parent_id: Option<ProcedureId>, lock_key: LockKey) -> ProcedureMeta {
|
||||
let (state_sender, state_receiver) = watch::channel(ProcedureState::Running);
|
||||
ProcedureMeta {
|
||||
id,
|
||||
lock_notify: Notify::new(),
|
||||
parent_id,
|
||||
child_notify: Notify::new(),
|
||||
lock_key,
|
||||
state_sender,
|
||||
state_receiver,
|
||||
children: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
locks
|
||||
/// Returns current [ProcedureState].
|
||||
fn state(&self) -> ProcedureState {
|
||||
self.state_receiver.borrow().clone()
|
||||
}
|
||||
|
||||
/// Update current [ProcedureState].
|
||||
fn set_state(&self, state: ProcedureState) {
|
||||
// Safety: ProcedureMeta also holds the receiver, so `send()` should never fail.
|
||||
self.state_sender.send(state).unwrap();
|
||||
}
|
||||
|
||||
/// Push `procedure_id` of the subprocedure to the metadata.
|
||||
fn push_child(&self, procedure_id: ProcedureId) {
|
||||
let mut children = self.children.lock().unwrap();
|
||||
children.push(procedure_id);
|
||||
}
|
||||
|
||||
/// Append subprocedures to given `buffer`.
|
||||
fn list_children(&self, buffer: &mut Vec<ProcedureId>) {
|
||||
let children = self.children.lock().unwrap();
|
||||
buffer.extend_from_slice(&children);
|
||||
}
|
||||
|
||||
/// Returns the number of subprocedures.
|
||||
fn num_children(&self) -> usize {
|
||||
self.children.lock().unwrap().len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reference counted pointer to [ProcedureMeta].
|
||||
type ProcedureMetaRef = Arc<ProcedureMeta>;
|
||||
|
||||
/// Procedure loaded from store.
|
||||
struct LoadedProcedure {
|
||||
procedure: BoxedProcedure,
|
||||
parent_id: Option<ProcedureId>,
|
||||
step: u32,
|
||||
}
|
||||
|
||||
/// Shared context of the manager.
|
||||
pub(crate) struct ManagerContext {
|
||||
/// Procedure loaders. The key is the type name of the procedure which the loader returns.
|
||||
loaders: Mutex<HashMap<String, BoxedProcedureLoader>>,
|
||||
lock_map: LockMap,
|
||||
procedures: RwLock<HashMap<ProcedureId, ProcedureMetaRef>>,
|
||||
/// Messages loaded from the procedure store.
|
||||
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ContextProvider for ManagerContext {
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.state(procedure_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagerContext {
|
||||
/// Returns a new [ManagerContext].
|
||||
fn new() -> ManagerContext {
|
||||
ManagerContext {
|
||||
loaders: Mutex::new(HashMap::new()),
|
||||
lock_map: LockMap::new(),
|
||||
procedures: RwLock::new(HashMap::new()),
|
||||
messages: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the procedure with specific `procedure_id` exists.
|
||||
fn contains_procedure(&self, procedure_id: ProcedureId) -> bool {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
procedures.contains_key(&procedure_id)
|
||||
}
|
||||
|
||||
/// Try to insert the `procedure` to the context if there is no procedure
|
||||
/// with same [ProcedureId].
|
||||
///
|
||||
/// Returns `false` if there is already a procedure using the same [ProcedureId].
|
||||
fn try_insert_procedure(&self, meta: ProcedureMetaRef) -> bool {
|
||||
let mut procedures = self.procedures.write().unwrap();
|
||||
if procedures.contains_key(&meta.id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let old = procedures.insert(meta.id, meta);
|
||||
debug_assert!(old.is_none());
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns the [ProcedureState] of specific `procedure_id`.
|
||||
fn state(&self, procedure_id: ProcedureId) -> Option<ProcedureState> {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
procedures.get(&procedure_id).map(|meta| meta.state())
|
||||
}
|
||||
|
||||
/// Returns the [Watcher] of specific `procedure_id`.
|
||||
fn watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
procedures
|
||||
.get(&procedure_id)
|
||||
.map(|meta| meta.state_receiver.clone())
|
||||
}
|
||||
|
||||
/// Notify a suspended parent procedure with specific `procedure_id` by its subprocedure.
|
||||
fn notify_by_subprocedure(&self, procedure_id: ProcedureId) {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
if let Some(meta) = procedures.get(&procedure_id) {
|
||||
meta.child_notify.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
/// Load procedure with specific `procedure_id` from cached [ProcedureMessage]s.
|
||||
fn load_one_procedure(&self, procedure_id: ProcedureId) -> Option<LoadedProcedure> {
|
||||
let message = {
|
||||
let messages = self.messages.lock().unwrap();
|
||||
messages.get(&procedure_id).cloned()?
|
||||
};
|
||||
|
||||
self.load_one_procedure_from_message(procedure_id, &message)
|
||||
}
|
||||
|
||||
/// Load procedure from specific [ProcedureMessage].
|
||||
fn load_one_procedure_from_message(
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
message: &ProcedureMessage,
|
||||
) -> Option<LoadedProcedure> {
|
||||
let loaders = self.loaders.lock().unwrap();
|
||||
let loader = loaders.get(&message.type_name).or_else(|| {
|
||||
logging::error!(
|
||||
"Loader not found, procedure_id: {}, type_name: {}",
|
||||
procedure_id,
|
||||
message.type_name
|
||||
);
|
||||
None
|
||||
})?;
|
||||
|
||||
let procedure = loader(&message.data)
|
||||
.map_err(|e| {
|
||||
logging::error!(
|
||||
"Failed to load procedure data, key: {}, source: {}",
|
||||
procedure_id,
|
||||
e
|
||||
);
|
||||
e
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
Some(LoadedProcedure {
|
||||
procedure,
|
||||
parent_id: message.parent_id,
|
||||
step: message.step,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns all procedures in the tree (including given `root` procedure).
|
||||
///
|
||||
/// If callers need a consistent view of the tree, they must ensure no new
|
||||
/// procedure is added to the tree during using this method.
|
||||
fn procedures_in_tree(&self, root: &ProcedureMetaRef) -> Vec<ProcedureId> {
|
||||
let sub_num = root.num_children();
|
||||
// Reserve capacity for the root procedure and its children.
|
||||
let mut procedures = Vec::with_capacity(1 + sub_num);
|
||||
|
||||
let mut queue = VecDeque::with_capacity(1 + sub_num);
|
||||
// Push the root procedure to the queue.
|
||||
queue.push_back(root.clone());
|
||||
|
||||
let mut children_ids = Vec::with_capacity(sub_num);
|
||||
let mut children = Vec::with_capacity(sub_num);
|
||||
while let Some(meta) = queue.pop_front() {
|
||||
procedures.push(meta.id);
|
||||
|
||||
// Find metadatas of children.
|
||||
children_ids.clear();
|
||||
meta.list_children(&mut children_ids);
|
||||
self.find_procedures(&children_ids, &mut children);
|
||||
|
||||
// Traverse children later.
|
||||
for child in children.drain(..) {
|
||||
queue.push_back(child);
|
||||
}
|
||||
}
|
||||
|
||||
procedures
|
||||
}
|
||||
|
||||
/// Finds procedures by given `procedure_ids`.
|
||||
///
|
||||
/// Ignores the id if corresponding procedure is not found.
|
||||
fn find_procedures(&self, procedure_ids: &[ProcedureId], metas: &mut Vec<ProcedureMetaRef>) {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
for procedure_id in procedure_ids {
|
||||
if let Some(meta) = procedures.get(procedure_id) {
|
||||
metas.push(meta.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove cached [ProcedureMessage] by ids.
|
||||
fn remove_messages(&self, procedure_ids: &[ProcedureId]) {
|
||||
let mut messages = self.messages.lock().unwrap();
|
||||
for procedure_id in procedure_ids {
|
||||
messages.remove(procedure_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config for [LocalManager].
|
||||
#[derive(Debug)]
|
||||
pub struct ManagerConfig {
|
||||
/// Object store
|
||||
pub object_store: ObjectStore,
|
||||
}
|
||||
|
||||
/// A [ProcedureManager] that maintains procedure states locally.
|
||||
pub struct LocalManager {
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
state_store: StateStoreRef,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig) -> LocalManager {
|
||||
LocalManager {
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit a root procedure with given `procedure_id`.
|
||||
fn submit_root(
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
procedure: BoxedProcedure,
|
||||
) -> Result<Watcher> {
|
||||
let meta = Arc::new(ProcedureMeta::new(procedure_id, None, procedure.lock_key()));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
store: ProcedureStore::new(self.state_store.clone()),
|
||||
};
|
||||
|
||||
let watcher = meta.state_receiver.clone();
|
||||
|
||||
// Inserts meta into the manager before actually spawnd the runner.
|
||||
ensure!(
|
||||
self.manager_ctx.try_insert_procedure(meta),
|
||||
DuplicateProcedureSnafu { procedure_id },
|
||||
);
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
// Run the root procedure.
|
||||
let _ = runner.run().await;
|
||||
});
|
||||
|
||||
Ok(watcher)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProcedureManager for LocalManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()> {
|
||||
let mut loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
ensure!(!loaders.contains_key(name), LoaderConflictSnafu { name });
|
||||
|
||||
loaders.insert(name.to_string(), loader);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher> {
|
||||
let procedure_id = procedure.id;
|
||||
ensure!(
|
||||
!self.manager_ctx.contains_procedure(procedure_id),
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
async fn recover(&self) -> Result<()> {
|
||||
logging::info!("LocalManager start to recover");
|
||||
|
||||
let procedure_store = ProcedureStore::new(self.state_store.clone());
|
||||
let messages = procedure_store.load_messages().await?;
|
||||
|
||||
for (procedure_id, message) in &messages {
|
||||
if message.parent_id.is_none() {
|
||||
// This is the root procedure. We only submit the root procedure as it will
|
||||
// submit sub-procedures to the manager.
|
||||
let Some(loaded_procedure) = self.manager_ctx.load_one_procedure_from_message(*procedure_id, message) else {
|
||||
// Try to load other procedures.
|
||||
continue;
|
||||
};
|
||||
|
||||
logging::info!(
|
||||
"Recover root procedure {}-{}, step: {}",
|
||||
loaded_procedure.procedure.type_name(),
|
||||
procedure_id,
|
||||
loaded_procedure.step
|
||||
);
|
||||
|
||||
if let Err(e) = self.submit_root(
|
||||
*procedure_id,
|
||||
loaded_procedure.step,
|
||||
loaded_procedure.procedure,
|
||||
) {
|
||||
logging::error!(e; "Failed to recover procedure {}", procedure_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.manager_ctx.state(procedure_id))
|
||||
}
|
||||
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
self.manager_ctx.watcher(procedure_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [ProcedureMeta] for test purpose.
|
||||
#[cfg(test)]
|
||||
fn procedure_meta_for_test() -> ProcedureMeta {
|
||||
ProcedureMeta {
|
||||
id: ProcedureId::random(),
|
||||
lock_notify: Notify::new(),
|
||||
parent_id: None,
|
||||
child_notify: Notify::new(),
|
||||
parent_locks: Vec::new(),
|
||||
lock_key: None,
|
||||
exec_meta: Mutex::new(ExecMeta {
|
||||
state: ProcedureState::Running,
|
||||
}),
|
||||
mod test_util {
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) fn procedure_meta_for_test() -> ProcedureMeta {
|
||||
ProcedureMeta::new(ProcedureId::random(), None, LockKey::default())
|
||||
}
|
||||
|
||||
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
ObjectStore::new(accessor).finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use common_error::prelude::StatusCode;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Context, Procedure, Status};
|
||||
|
||||
#[test]
|
||||
fn test_locks_needed() {
|
||||
let mut meta = procedure_meta_for_test();
|
||||
let locks = meta.locks_needed();
|
||||
assert!(locks.is_empty());
|
||||
fn test_manager_context() {
|
||||
let ctx = ManagerContext::new();
|
||||
let meta = Arc::new(test_util::procedure_meta_for_test());
|
||||
|
||||
let parent_locks = vec![LockKey::new("a"), LockKey::new("b")];
|
||||
meta.parent_locks = parent_locks.clone();
|
||||
let locks = meta.locks_needed();
|
||||
assert_eq!(parent_locks, locks);
|
||||
assert!(!ctx.contains_procedure(meta.id));
|
||||
assert!(ctx.state(meta.id).is_none());
|
||||
|
||||
meta.lock_key = Some(LockKey::new("c"));
|
||||
let locks = meta.locks_needed();
|
||||
assert_eq!(
|
||||
vec![LockKey::new("a"), LockKey::new("b"), LockKey::new("c")],
|
||||
locks
|
||||
);
|
||||
assert!(ctx.try_insert_procedure(meta.clone()));
|
||||
assert!(ctx.contains_procedure(meta.id));
|
||||
|
||||
assert_eq!(ProcedureState::Running, ctx.state(meta.id).unwrap());
|
||||
meta.set_state(ProcedureState::Done);
|
||||
assert_eq!(ProcedureState::Done, ctx.state(meta.id).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_manager_context_insert_duplicate() {
|
||||
let ctx = ManagerContext::new();
|
||||
let meta = Arc::new(test_util::procedure_meta_for_test());
|
||||
|
||||
assert!(ctx.try_insert_procedure(meta.clone()));
|
||||
assert!(!ctx.try_insert_procedure(meta));
|
||||
}
|
||||
|
||||
fn new_child(parent_id: ProcedureId, ctx: &ManagerContext) -> ProcedureMetaRef {
|
||||
let mut child = test_util::procedure_meta_for_test();
|
||||
child.parent_id = Some(parent_id);
|
||||
let child = Arc::new(child);
|
||||
assert!(ctx.try_insert_procedure(child.clone()));
|
||||
|
||||
let mut parent = Vec::new();
|
||||
ctx.find_procedures(&[parent_id], &mut parent);
|
||||
parent[0].push_child(child.id);
|
||||
|
||||
child
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_procedures_in_tree() {
|
||||
let ctx = ManagerContext::new();
|
||||
let root = Arc::new(test_util::procedure_meta_for_test());
|
||||
assert!(ctx.try_insert_procedure(root.clone()));
|
||||
|
||||
assert_eq!(1, ctx.procedures_in_tree(&root).len());
|
||||
|
||||
let child1 = new_child(root.id, &ctx);
|
||||
let child2 = new_child(root.id, &ctx);
|
||||
|
||||
let child3 = new_child(child1.id, &ctx);
|
||||
let child4 = new_child(child1.id, &ctx);
|
||||
|
||||
let child5 = new_child(child2.id, &ctx);
|
||||
|
||||
let expect = vec![
|
||||
root.id, child1.id, child2.id, child3.id, child4.id, child5.id,
|
||||
];
|
||||
assert_eq!(expect, ctx.procedures_in_tree(&root));
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ProcedureToLoad {
|
||||
content: String,
|
||||
lock_key: LockKey,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for ProcedureToLoad {
|
||||
fn type_name(&self) -> &str {
|
||||
"ProcedureToLoad"
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
|
||||
Ok(Status::Done)
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
Ok(self.content.clone())
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
self.lock_key.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcedureToLoad {
|
||||
fn new(content: &str) -> ProcedureToLoad {
|
||||
ProcedureToLoad {
|
||||
content: content.to_string(),
|
||||
lock_key: LockKey::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn loader() -> BoxedProcedureLoader {
|
||||
let f = |json: &str| {
|
||||
let procedure = ProcedureToLoad::new(json);
|
||||
Ok(Box::new(procedure) as _)
|
||||
};
|
||||
Box::new(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_register_loader() {
|
||||
let dir = TempDir::new("register").unwrap();
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
.unwrap();
|
||||
// Register duplicate loader.
|
||||
let err = manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::LoaderConflict { .. }), "{err}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_recover() {
|
||||
let dir = TempDir::new("recover").unwrap();
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let config = ManagerConfig {
|
||||
object_store: object_store.clone(),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
.unwrap();
|
||||
|
||||
// Prepare data
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let root: BoxedProcedure = Box::new(ProcedureToLoad::new("test recover manager"));
|
||||
let root_id = ProcedureId::random();
|
||||
// Prepare data for the root procedure.
|
||||
for step in 0..3 {
|
||||
procedure_store
|
||||
.store_procedure(root_id, step, &root, None)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let child: BoxedProcedure = Box::new(ProcedureToLoad::new("a child procedure"));
|
||||
let child_id = ProcedureId::random();
|
||||
// Prepare data for the child procedure
|
||||
for step in 0..2 {
|
||||
procedure_store
|
||||
.store_procedure(child_id, step, &child, Some(root_id))
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Recover the manager
|
||||
manager.recover().await.unwrap();
|
||||
|
||||
// The manager should submit the root procedure.
|
||||
assert!(manager.procedure_state(root_id).await.unwrap().is_some());
|
||||
// Since the mocked root procedure actually doesn't submit subprocedures, so there is no
|
||||
// related state.
|
||||
assert!(manager.procedure_state(child_id).await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_submit_procedure() {
|
||||
let dir = TempDir::new("submit").unwrap();
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(manager.procedure_watcher(procedure_id).is_none());
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
// Wait for the procedure done.
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
assert_eq!(ProcedureState::Done, *watcher.borrow());
|
||||
|
||||
// Try to submit procedure with same id again.
|
||||
let err = manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(ProcedureToLoad::new("submit")),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::DuplicateProcedure { .. }), "{err}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_changed_on_err() {
|
||||
let dir = TempDir::new("on_err").unwrap();
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
panic: bool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for MockProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
"MockProcedure"
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
|
||||
if self.panic {
|
||||
// Test the runner can set the state to failed even the procedure
|
||||
// panics.
|
||||
panic!();
|
||||
} else {
|
||||
Err(Error::external(MockError::new(StatusCode::Unexpected)))
|
||||
}
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::single("test.submit")
|
||||
}
|
||||
}
|
||||
|
||||
let check_procedure = |procedure| {
|
||||
async {
|
||||
let procedure_id = ProcedureId::random();
|
||||
let mut watcher = manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
// Wait for the notification.
|
||||
watcher.changed().await.unwrap();
|
||||
assert_eq!(ProcedureState::Failed, *watcher.borrow());
|
||||
}
|
||||
};
|
||||
|
||||
check_procedure(MockProcedure { panic: false }).await;
|
||||
check_procedure(MockProcedure { panic: true }).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,13 +54,13 @@ impl Lock {
|
||||
}
|
||||
|
||||
/// Manages lock entries for procedures.
|
||||
struct LockMap {
|
||||
pub(crate) struct LockMap {
|
||||
locks: RwLock<HashMap<String, Lock>>,
|
||||
}
|
||||
|
||||
impl LockMap {
|
||||
/// Returns a new [LockMap].
|
||||
fn new() -> LockMap {
|
||||
pub(crate) fn new() -> LockMap {
|
||||
LockMap {
|
||||
locks: RwLock::new(HashMap::new()),
|
||||
}
|
||||
@@ -73,7 +73,7 @@ impl LockMap {
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the procedure acquires the lock recursively.
|
||||
async fn acquire_lock(&self, key: &str, meta: ProcedureMetaRef) {
|
||||
pub(crate) async fn acquire_lock(&self, key: &str, meta: ProcedureMetaRef) {
|
||||
assert!(!self.hold_lock(key, meta.id));
|
||||
|
||||
{
|
||||
@@ -101,7 +101,7 @@ impl LockMap {
|
||||
}
|
||||
|
||||
/// Release lock by `key`.
|
||||
fn release_lock(&self, key: &str, procedure_id: ProcedureId) {
|
||||
pub(crate) fn release_lock(&self, key: &str, procedure_id: ProcedureId) {
|
||||
let mut locks = self.locks.write().unwrap();
|
||||
if let Some(lock) = locks.get_mut(key) {
|
||||
if lock.owner.id != procedure_id {
|
||||
@@ -142,11 +142,11 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::local;
|
||||
use crate::local::test_util;
|
||||
|
||||
#[test]
|
||||
fn test_lock_no_waiter() {
|
||||
let meta = Arc::new(local::procedure_meta_for_test());
|
||||
let meta = Arc::new(test_util::procedure_meta_for_test());
|
||||
let mut lock = Lock::from_owner(meta);
|
||||
|
||||
assert!(!lock.switch_owner());
|
||||
@@ -154,10 +154,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_lock_with_waiter() {
|
||||
let owner = Arc::new(local::procedure_meta_for_test());
|
||||
let owner = Arc::new(test_util::procedure_meta_for_test());
|
||||
let mut lock = Lock::from_owner(owner);
|
||||
|
||||
let waiter = Arc::new(local::procedure_meta_for_test());
|
||||
let waiter = Arc::new(test_util::procedure_meta_for_test());
|
||||
lock.waiters.push_back(waiter.clone());
|
||||
|
||||
assert!(lock.switch_owner());
|
||||
@@ -171,11 +171,11 @@ mod tests {
|
||||
async fn test_lock_map() {
|
||||
let key = "hello";
|
||||
|
||||
let owner = Arc::new(local::procedure_meta_for_test());
|
||||
let owner = Arc::new(test_util::procedure_meta_for_test());
|
||||
let lock_map = Arc::new(LockMap::new());
|
||||
lock_map.acquire_lock(key, owner.clone()).await;
|
||||
|
||||
let waiter = Arc::new(local::procedure_meta_for_test());
|
||||
let waiter = Arc::new(test_util::procedure_meta_for_test());
|
||||
let waiter_id = waiter.id;
|
||||
|
||||
// Waiter release the lock, this should not take effect.
|
||||
|
||||
822
src/common/procedure/src/local/runner.rs
Normal file
822
src/common/procedure/src/local/runner.rs
Normal file
@@ -0,0 +1,822 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::logging;
|
||||
use tokio::time;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::store::ProcedureStore;
|
||||
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
||||
|
||||
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ExecResult {
|
||||
Continue,
|
||||
Done,
|
||||
RetryLater,
|
||||
Failed(Error),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl ExecResult {
|
||||
fn is_continue(&self) -> bool {
|
||||
matches!(self, ExecResult::Continue)
|
||||
}
|
||||
|
||||
fn is_done(&self) -> bool {
|
||||
matches!(self, ExecResult::Done)
|
||||
}
|
||||
|
||||
fn is_retry_later(&self) -> bool {
|
||||
matches!(self, ExecResult::RetryLater)
|
||||
}
|
||||
|
||||
fn is_failed(&self) -> bool {
|
||||
matches!(self, ExecResult::Failed(_))
|
||||
}
|
||||
}
|
||||
|
||||
/// A guard to cleanup procedure state.
|
||||
struct ProcedureGuard {
|
||||
meta: ProcedureMetaRef,
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
finish: bool,
|
||||
}
|
||||
|
||||
impl ProcedureGuard {
|
||||
/// Returns a new [ProcedureGuard].
|
||||
fn new(meta: ProcedureMetaRef, manager_ctx: Arc<ManagerContext>) -> ProcedureGuard {
|
||||
ProcedureGuard {
|
||||
meta,
|
||||
manager_ctx,
|
||||
finish: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// The procedure is finished successfully.
|
||||
fn finish(mut self) {
|
||||
self.finish = true;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ProcedureGuard {
|
||||
fn drop(&mut self) {
|
||||
if !self.finish {
|
||||
logging::error!("Procedure {} exits unexpectedly", self.meta.id);
|
||||
|
||||
// Set state to failed. This is useful in test as runtime may not abort when the runner task panics.
|
||||
// See https://github.com/tokio-rs/tokio/issues/2002 .
|
||||
// We set set_panic_hook() in the application's main function. But our tests don't have this panic hook.
|
||||
self.meta.set_state(ProcedureState::Failed);
|
||||
}
|
||||
|
||||
// Notify parent procedure.
|
||||
if let Some(parent_id) = self.meta.parent_id {
|
||||
self.manager_ctx.notify_by_subprocedure(parent_id);
|
||||
}
|
||||
|
||||
// Release lock in reverse order.
|
||||
for key in self.meta.lock_key.keys_to_unlock() {
|
||||
self.manager_ctx.lock_map.release_lock(key, self.meta.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(yingwen): Support cancellation.
|
||||
pub(crate) struct Runner {
|
||||
pub(crate) meta: ProcedureMetaRef,
|
||||
pub(crate) procedure: BoxedProcedure,
|
||||
pub(crate) manager_ctx: Arc<ManagerContext>,
|
||||
pub(crate) step: u32,
|
||||
pub(crate) store: ProcedureStore,
|
||||
}
|
||||
|
||||
impl Runner {
|
||||
/// Run the procedure.
|
||||
pub(crate) async fn run(mut self) -> Result<()> {
|
||||
// Ensure we can update the procedure state.
|
||||
let guard = ProcedureGuard::new(self.meta.clone(), self.manager_ctx.clone());
|
||||
|
||||
logging::info!(
|
||||
"Runner {}-{} starts",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
);
|
||||
|
||||
// TODO(yingwen): Detect recursive locking (and deadlock) if possible. Maybe we could detect
|
||||
// recursive locking by adding a root procedure id to the meta.
|
||||
for key in self.meta.lock_key.keys_to_lock() {
|
||||
// Acquire lock for each key.
|
||||
self.manager_ctx
|
||||
.lock_map
|
||||
.acquire_lock(key, self.meta.clone())
|
||||
.await;
|
||||
}
|
||||
|
||||
let mut result = Ok(());
|
||||
// Execute the procedure. We need to release the lock whenever the the execution
|
||||
// is successful or fail.
|
||||
if let Err(e) = self.execute_procedure_in_loop().await {
|
||||
result = Err(e);
|
||||
}
|
||||
|
||||
// We can't remove the metadata of the procedure now as users and its parent might
|
||||
// need to query its state.
|
||||
// TODO(yingwen): 1. Add TTL to the metadata; 2. Only keep state in the procedure store
|
||||
// so we don't need to always store the metadata in memory after the procedure is done.
|
||||
|
||||
// Release locks and notify parent procedure.
|
||||
guard.finish();
|
||||
|
||||
// If this is the root procedure, clean up message cache.
|
||||
if self.meta.parent_id.is_none() {
|
||||
let procedure_ids = self.manager_ctx.procedures_in_tree(&self.meta);
|
||||
self.manager_ctx.remove_messages(&procedure_ids);
|
||||
}
|
||||
|
||||
logging::info!(
|
||||
"Runner {}-{} exits",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn execute_procedure_in_loop(&mut self) -> Result<()> {
|
||||
let ctx = Context {
|
||||
procedure_id: self.meta.id,
|
||||
provider: self.manager_ctx.clone(),
|
||||
};
|
||||
|
||||
loop {
|
||||
match self.execute_once(&ctx).await {
|
||||
ExecResult::Continue => (),
|
||||
ExecResult::Done => return Ok(()),
|
||||
ExecResult::RetryLater => {
|
||||
self.wait_on_err().await;
|
||||
}
|
||||
ExecResult::Failed(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
|
||||
match self.procedure.execute(ctx).await {
|
||||
Ok(status) => {
|
||||
logging::debug!(
|
||||
"Execute procedure {}-{} once, status: {:?}, need_persist: {}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
status,
|
||||
status.need_persist(),
|
||||
);
|
||||
|
||||
if status.need_persist() && self.persist_procedure().await.is_err() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
match status {
|
||||
Status::Executing { .. } => (),
|
||||
Status::Suspended { subprocedures, .. } => {
|
||||
self.on_suspended(subprocedures).await;
|
||||
}
|
||||
Status::Done => {
|
||||
if self.commit_procedure().await.is_err() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
self.done();
|
||||
return ExecResult::Done;
|
||||
}
|
||||
}
|
||||
|
||||
ExecResult::Continue
|
||||
}
|
||||
Err(e) => {
|
||||
logging::error!(
|
||||
e;
|
||||
"Failed to execute procedure {}-{}, retry: {}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
e.is_retry_later(),
|
||||
);
|
||||
|
||||
if e.is_retry_later() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
self.meta.set_state(ProcedureState::Failed);
|
||||
|
||||
// Write rollback key so we can skip this procedure while recovering procedures.
|
||||
if self.rollback_procedure().await.is_err() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
ExecResult::Failed(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit a subprocedure with specific `procedure_id`.
|
||||
fn submit_subprocedure(&self, procedure_id: ProcedureId, mut procedure: BoxedProcedure) {
|
||||
if self.manager_ctx.contains_procedure(procedure_id) {
|
||||
// If the parent has already submitted this procedure, don't submit it again.
|
||||
return;
|
||||
}
|
||||
|
||||
let mut step = 0;
|
||||
if let Some(loaded_procedure) = self.manager_ctx.load_one_procedure(procedure_id) {
|
||||
// Try to load procedure state from the message to avoid re-run the subprocedure
|
||||
// from initial state.
|
||||
assert_eq!(self.meta.id, loaded_procedure.parent_id.unwrap());
|
||||
|
||||
// Use the dumped procedure from the procedure store.
|
||||
procedure = loaded_procedure.procedure;
|
||||
// Update step number.
|
||||
step = loaded_procedure.step;
|
||||
}
|
||||
|
||||
let meta = Arc::new(ProcedureMeta::new(
|
||||
procedure_id,
|
||||
Some(self.meta.id),
|
||||
procedure.lock_key(),
|
||||
));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
store: self.store.clone(),
|
||||
};
|
||||
|
||||
// Insert the procedure. We already check the procedure existence before inserting
|
||||
// so we add an assertion to ensure the procedure id is unique and no other procedures
|
||||
// using the same procedure id.
|
||||
assert!(
|
||||
self.manager_ctx.try_insert_procedure(meta),
|
||||
"Procedure {}-{} submit an existing procedure {}-{}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
runner.procedure.type_name(),
|
||||
procedure_id,
|
||||
);
|
||||
|
||||
// Add the id of the subprocedure to the metadata.
|
||||
self.meta.push_child(procedure_id);
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
// Run the root procedure.
|
||||
runner.run().await
|
||||
});
|
||||
}
|
||||
|
||||
async fn wait_on_err(&self) {
|
||||
time::sleep(ERR_WAIT_DURATION).await;
|
||||
}
|
||||
|
||||
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
|
||||
let has_child = !subprocedures.is_empty();
|
||||
for subprocedure in subprocedures {
|
||||
logging::info!(
|
||||
"Procedure {}-{} submit subprocedure {}-{}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
subprocedure.procedure.type_name(),
|
||||
subprocedure.id,
|
||||
);
|
||||
|
||||
self.submit_subprocedure(subprocedure.id, subprocedure.procedure);
|
||||
}
|
||||
|
||||
logging::info!(
|
||||
"Procedure {}-{} is waiting for subprocedures",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
);
|
||||
|
||||
// Wait for subprocedures.
|
||||
if has_child {
|
||||
self.meta.child_notify.notified().await;
|
||||
|
||||
logging::info!(
|
||||
"Procedure {}-{} is waked up",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist_procedure(&mut self) -> Result<()> {
|
||||
self.store
|
||||
.store_procedure(
|
||||
self.meta.id,
|
||||
self.step,
|
||||
&self.procedure,
|
||||
self.meta.parent_id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
logging::error!(
|
||||
e; "Failed to persist procedure {}-{}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
);
|
||||
e
|
||||
})?;
|
||||
self.step += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn commit_procedure(&mut self) -> Result<()> {
|
||||
self.store
|
||||
.commit_procedure(self.meta.id, self.step)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
logging::error!(
|
||||
e; "Failed to commit procedure {}-{}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
);
|
||||
e
|
||||
})?;
|
||||
self.step += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn rollback_procedure(&mut self) -> Result<()> {
|
||||
self.store
|
||||
.rollback_procedure(self.meta.id, self.step)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
logging::error!(
|
||||
e; "Failed to write rollback key for procedure {}-{}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
);
|
||||
e
|
||||
})?;
|
||||
self.step += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn done(&self) {
|
||||
// TODO(yingwen): Add files to remove list.
|
||||
logging::info!(
|
||||
"Procedure {}-{} done",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
);
|
||||
|
||||
// Mark the state of this procedure to done.
|
||||
self.meta.set_state(ProcedureState::Done);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::PlainError;
|
||||
use common_error::mock::MockError;
|
||||
use common_error::prelude::StatusCode;
|
||||
use futures_util::future::BoxFuture;
|
||||
use futures_util::{FutureExt, TryStreamExt};
|
||||
use object_store::ObjectStore;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::local::test_util;
|
||||
use crate::{ContextProvider, LockKey, Procedure};
|
||||
|
||||
const ROOT_ID: &str = "9f805a1f-05f7-490c-9f91-bd56e3cc54c1";
|
||||
|
||||
fn new_runner(
|
||||
meta: ProcedureMetaRef,
|
||||
procedure: BoxedProcedure,
|
||||
store: ProcedureStore,
|
||||
) -> Runner {
|
||||
Runner {
|
||||
meta,
|
||||
procedure,
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
step: 0,
|
||||
store,
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
|
||||
let dir = format!("{procedure_id}/");
|
||||
let object = object_store.object(&dir);
|
||||
let lister = object.list().await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.map_ok(|de| de.name().to_string())
|
||||
.try_collect()
|
||||
.await
|
||||
.unwrap();
|
||||
files_in_dir.sort_unstable();
|
||||
assert_eq!(files, files_in_dir);
|
||||
}
|
||||
|
||||
fn context_without_provider(procedure_id: ProcedureId) -> Context {
|
||||
struct MockProvider;
|
||||
|
||||
#[async_trait]
|
||||
impl ContextProvider for MockProvider {
|
||||
async fn procedure_state(
|
||||
&self,
|
||||
_procedure_id: ProcedureId,
|
||||
) -> Result<Option<ProcedureState>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
Context {
|
||||
procedure_id,
|
||||
provider: Arc::new(MockProvider),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ProcedureAdapter<F> {
|
||||
data: String,
|
||||
lock_key: LockKey,
|
||||
exec_fn: F,
|
||||
}
|
||||
|
||||
impl<F> ProcedureAdapter<F> {
|
||||
fn new_meta(&self, uuid: &str) -> ProcedureMetaRef {
|
||||
let mut meta = test_util::procedure_meta_for_test();
|
||||
meta.id = ProcedureId::parse_str(uuid).unwrap();
|
||||
meta.lock_key = self.lock_key.clone();
|
||||
|
||||
Arc::new(meta)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<F> Procedure for ProcedureAdapter<F>
|
||||
where
|
||||
F: FnMut(Context) -> BoxFuture<'static, Result<Status>> + Send + Sync,
|
||||
{
|
||||
fn type_name(&self) -> &str {
|
||||
"ProcedureAdapter"
|
||||
}
|
||||
|
||||
async fn execute(&mut self, ctx: &Context) -> Result<Status> {
|
||||
let f = (self.exec_fn)(ctx.clone());
|
||||
f.await
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
Ok(self.data.clone())
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
self.lock_key.clone()
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_once_normal(persist: bool, first_files: &[&str], second_files: &[&str]) {
|
||||
let mut times = 0;
|
||||
let exec_fn = move |_| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
Ok(Status::Executing { persist })
|
||||
} else {
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let normal = ProcedureAdapter {
|
||||
data: "normal".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("normal").unwrap();
|
||||
let meta = normal.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta, Box::new(normal), procedure_store);
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_continue(), "{res:?}");
|
||||
check_files(&object_store, ctx.procedure_id, first_files).await;
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_done(), "{res:?}");
|
||||
check_files(&object_store, ctx.procedure_id, second_files).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_once_normal() {
|
||||
execute_once_normal(
|
||||
true,
|
||||
&["0000000000.step"],
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_once_normal_skip_persist() {
|
||||
execute_once_normal(false, &[], &["0000000000.commit"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_suspend_empty() {
|
||||
let exec_fn = move |_| {
|
||||
async move {
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: Vec::new(),
|
||||
persist: false,
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let suspend = ProcedureAdapter {
|
||||
data: "suspend".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("suspend").unwrap();
|
||||
let meta = suspend.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta, Box::new(suspend), procedure_store);
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_continue(), "{res:?}");
|
||||
}
|
||||
|
||||
fn new_child_procedure(procedure_id: ProcedureId, keys: &[&str]) -> ProcedureWithId {
|
||||
let mut times = 0;
|
||||
let exec_fn = move |_| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
time::sleep(Duration::from_millis(200)).await;
|
||||
Ok(Status::Executing { persist: true })
|
||||
} else {
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let child = ProcedureAdapter {
|
||||
data: "child".to_string(),
|
||||
lock_key: LockKey::new(keys.iter().map(|k| k.to_string())),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(child),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_suspend_by_subprocedures() {
|
||||
let mut times = 0;
|
||||
let children_ids = [ProcedureId::random(), ProcedureId::random()];
|
||||
let keys = [
|
||||
&[
|
||||
"catalog.schema.table.region-0",
|
||||
"catalog.schema.table.region-1",
|
||||
],
|
||||
&[
|
||||
"catalog.schema.table.region-2",
|
||||
"catalog.schema.table.region-3",
|
||||
],
|
||||
];
|
||||
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
// Submit subprocedures.
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: children_ids
|
||||
.into_iter()
|
||||
.zip(keys)
|
||||
.map(|(id, key_slice)| new_child_procedure(id, key_slice))
|
||||
.collect(),
|
||||
persist: true,
|
||||
})
|
||||
} else {
|
||||
// Wait for subprocedures.
|
||||
let mut all_child_done = true;
|
||||
for id in children_ids {
|
||||
if ctx.provider.procedure_state(id).await.unwrap()
|
||||
!= Some(ProcedureState::Done)
|
||||
{
|
||||
all_child_done = false;
|
||||
}
|
||||
}
|
||||
if all_child_done {
|
||||
Ok(Status::Done)
|
||||
} else {
|
||||
// Return suspended to wait for notify.
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: Vec::new(),
|
||||
persist: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let parent = ProcedureAdapter {
|
||||
data: "parent".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("parent").unwrap();
|
||||
let meta = parent.new_meta(ROOT_ID);
|
||||
let procedure_id = meta.id;
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store);
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta));
|
||||
// Replace the manager ctx.
|
||||
runner.manager_ctx = manager_ctx;
|
||||
|
||||
runner.run().await.unwrap();
|
||||
|
||||
// Check files on store.
|
||||
for child_id in children_ids {
|
||||
check_files(
|
||||
&object_store,
|
||||
child_id,
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
check_files(
|
||||
&object_store,
|
||||
procedure_id,
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_on_error() {
|
||||
let exec_fn =
|
||||
|_| async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }.boxed();
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("fail").unwrap();
|
||||
let meta = fail.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta.clone(), Box::new(fail), procedure_store);
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_failed(), "{res:?}");
|
||||
assert_eq!(ProcedureState::Failed, meta.state());
|
||||
check_files(&object_store, ctx.procedure_id, &["0000000000.rollback"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_on_retry_later_error() {
|
||||
let mut times = 0;
|
||||
|
||||
let exec_fn = move |_| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
|
||||
} else {
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
|
||||
let retry_later = ProcedureAdapter {
|
||||
data: "retry_later".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("retry_later").unwrap();
|
||||
let meta = retry_later.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta.clone(), Box::new(retry_later), procedure_store);
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_retry_later(), "{res:?}");
|
||||
assert_eq!(ProcedureState::Running, meta.state());
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_done(), "{res:?}");
|
||||
assert_eq!(ProcedureState::Done, meta.state());
|
||||
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_child_error() {
|
||||
let mut times = 0;
|
||||
let child_id = ProcedureId::random();
|
||||
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
// Submit subprocedures.
|
||||
let exec_fn = |_| {
|
||||
async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }
|
||||
.boxed()
|
||||
};
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table.region-0"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: vec![ProcedureWithId {
|
||||
id: child_id,
|
||||
procedure: Box::new(fail),
|
||||
}],
|
||||
persist: true,
|
||||
})
|
||||
} else {
|
||||
// Wait for subprocedures.
|
||||
let state = ctx.provider.procedure_state(child_id).await.unwrap();
|
||||
if state == Some(ProcedureState::Failed) {
|
||||
// The parent procedure to abort itself if child procedure is failed.
|
||||
Err(Error::from_error_ext(PlainError::new(
|
||||
"subprocedure failed".to_string(),
|
||||
StatusCode::Unexpected,
|
||||
)))
|
||||
} else {
|
||||
// Return suspended to wait for notify.
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: Vec::new(),
|
||||
persist: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let parent = ProcedureAdapter {
|
||||
data: "parent".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("child_err").unwrap();
|
||||
let meta = parent.new_meta(ROOT_ID);
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store);
|
||||
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta));
|
||||
// Replace the manager ctx.
|
||||
runner.manager_ctx = manager_ctx;
|
||||
|
||||
// Run the runer and execute the procedure.
|
||||
let err = runner.run().await.unwrap_err();
|
||||
assert!(err.to_string().contains("subprocedure failed"), "{err}");
|
||||
}
|
||||
}
|
||||
@@ -18,12 +18,15 @@ use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use tokio::sync::watch::Receiver;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Procedure execution status.
|
||||
#[derive(Debug)]
|
||||
pub enum Status {
|
||||
/// The procedure is still executing.
|
||||
Executing {
|
||||
@@ -57,11 +60,23 @@ impl Status {
|
||||
}
|
||||
}
|
||||
|
||||
/// [ContextProvider] provides information about procedures in the [ProcedureManager].
|
||||
#[async_trait]
|
||||
pub trait ContextProvider: Send + Sync {
|
||||
/// Query the procedure state.
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>>;
|
||||
}
|
||||
|
||||
/// Reference-counted pointer to [ContextProvider].
|
||||
pub type ContextProviderRef = Arc<dyn ContextProvider>;
|
||||
|
||||
/// Procedure execution context.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone)]
|
||||
pub struct Context {
|
||||
/// Id of the procedure.
|
||||
pub procedure_id: ProcedureId,
|
||||
/// [ProcedureManager] context provider.
|
||||
pub provider: ContextProviderRef,
|
||||
}
|
||||
|
||||
/// A `Procedure` represents an operation or a set of operations to be performed step-by-step.
|
||||
@@ -78,25 +93,41 @@ pub trait Procedure: Send + Sync {
|
||||
/// Dump the state of the procedure to a string.
|
||||
fn dump(&self) -> Result<String>;
|
||||
|
||||
/// Returns the [LockKey] if this procedure needs to acquire lock.
|
||||
fn lock_key(&self) -> Option<LockKey>;
|
||||
/// Returns the [LockKey] that this procedure needs to acquire.
|
||||
fn lock_key(&self) -> LockKey;
|
||||
}
|
||||
|
||||
/// A key to identify the lock.
|
||||
// We might hold multiple keys in this struct. When there are multiple keys, we need to sort the
|
||||
// keys lock all the keys in order to avoid dead lock.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LockKey(String);
|
||||
/// Keys to identify required locks.
|
||||
///
|
||||
/// [LockKey] always sorts keys lexicographically so that they can be acquired
|
||||
/// in the same order.
|
||||
// Most procedures should only acquire 1 ~ 2 locks so we use smallvec to hold keys.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct LockKey(SmallVec<[String; 2]>);
|
||||
|
||||
impl LockKey {
|
||||
/// Returns a new [LockKey].
|
||||
pub fn new(key: impl Into<String>) -> LockKey {
|
||||
LockKey(key.into())
|
||||
/// Returns a new [LockKey] with only one key.
|
||||
pub fn single(key: impl Into<String>) -> LockKey {
|
||||
LockKey(smallvec![key.into()])
|
||||
}
|
||||
|
||||
/// Returns the lock key.
|
||||
pub fn key(&self) -> &str {
|
||||
&self.0
|
||||
/// Returns a new [LockKey] with keys from specific `iter`.
|
||||
pub fn new(iter: impl IntoIterator<Item = String>) -> LockKey {
|
||||
let mut vec: SmallVec<_> = iter.into_iter().collect();
|
||||
vec.sort();
|
||||
// Dedup keys to avoid acquiring the same key multiple times.
|
||||
vec.dedup();
|
||||
LockKey(vec)
|
||||
}
|
||||
|
||||
/// Returns the keys to lock.
|
||||
pub fn keys_to_lock(&self) -> impl Iterator<Item = &String> {
|
||||
self.0.iter()
|
||||
}
|
||||
|
||||
/// Returns the keys to unlock.
|
||||
pub fn keys_to_unlock(&self) -> impl Iterator<Item = &String> {
|
||||
self.0.iter().rev()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,6 +152,12 @@ impl ProcedureWithId {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ProcedureWithId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}-{}", self.procedure.type_name(), self.id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub struct ParseIdError {
|
||||
source: uuid::Error,
|
||||
@@ -163,7 +200,7 @@ pub type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Sen
|
||||
|
||||
// TODO(yingwen): Find a way to return the error message if the procedure is failed.
|
||||
/// State of a submitted procedure.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ProcedureState {
|
||||
/// The procedure is running.
|
||||
Running,
|
||||
@@ -173,6 +210,9 @@ pub enum ProcedureState {
|
||||
Failed,
|
||||
}
|
||||
|
||||
/// Watcher to watch procedure state.
|
||||
pub type Watcher = Receiver<ProcedureState>;
|
||||
|
||||
// TODO(yingwen): Shutdown
|
||||
/// `ProcedureManager` executes [Procedure] submitted to it.
|
||||
#[async_trait]
|
||||
@@ -181,7 +221,9 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
/// Submits a procedure to execute.
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
///
|
||||
/// Returns a [Watcher] to watch the created procedure.
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher>;
|
||||
|
||||
/// Recovers unfinished procedures and reruns them.
|
||||
///
|
||||
@@ -192,6 +234,9 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
///
|
||||
/// Returns `Ok(None)` if the procedure doesn't exist.
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>>;
|
||||
|
||||
/// Returns a [Watcher] to watch [ProcedureState] of specific procedure.
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher>;
|
||||
}
|
||||
|
||||
/// Ref-counted pointer to the [ProcedureManager].
|
||||
@@ -228,8 +273,21 @@ mod tests {
|
||||
#[test]
|
||||
fn test_lock_key() {
|
||||
let entity = "catalog.schema.my_table";
|
||||
let key = LockKey::new(entity);
|
||||
assert_eq!(entity, key.key());
|
||||
let key = LockKey::single(entity);
|
||||
assert_eq!(vec![entity], key.keys_to_lock().collect::<Vec<_>>());
|
||||
assert_eq!(vec![entity], key.keys_to_unlock().collect::<Vec<_>>());
|
||||
|
||||
let key = LockKey::new([
|
||||
"b".to_string(),
|
||||
"c".to_string(),
|
||||
"a".to_string(),
|
||||
"c".to_string(),
|
||||
]);
|
||||
assert_eq!(vec!["a", "b", "c"], key.keys_to_lock().collect::<Vec<_>>());
|
||||
assert_eq!(
|
||||
vec!["c", "b", "a"],
|
||||
key.keys_to_unlock().collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -14,37 +14,46 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::logging;
|
||||
use futures::TryStreamExt;
|
||||
use object_store::ObjectStore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Result, ToJsonSnafu};
|
||||
use crate::store::state_store::StateStoreRef;
|
||||
pub(crate) use crate::store::state_store::{ObjectStateStore, StateStoreRef};
|
||||
use crate::{BoxedProcedure, ProcedureId};
|
||||
|
||||
mod state_store;
|
||||
|
||||
/// Serialized data of a procedure.
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
struct ProcedureMessage {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ProcedureMessage {
|
||||
/// Type name of the procedure. The procedure framework also use the type name to
|
||||
/// find a loader to load the procedure.
|
||||
type_name: String,
|
||||
pub type_name: String,
|
||||
/// The data of the procedure.
|
||||
data: String,
|
||||
pub data: String,
|
||||
/// Parent procedure id.
|
||||
parent_id: Option<ProcedureId>,
|
||||
pub parent_id: Option<ProcedureId>,
|
||||
/// Current step.
|
||||
pub step: u32,
|
||||
}
|
||||
|
||||
/// Procedure storage layer.
|
||||
#[derive(Clone)]
|
||||
struct ProcedureStore(StateStoreRef);
|
||||
pub(crate) struct ProcedureStore(StateStoreRef);
|
||||
|
||||
impl ProcedureStore {
|
||||
/// Creates a new [ProcedureStore] from specific [StateStoreRef].
|
||||
pub(crate) fn new(state_store: StateStoreRef) -> ProcedureStore {
|
||||
ProcedureStore(state_store)
|
||||
}
|
||||
|
||||
/// Dump the `procedure` to the storage.
|
||||
async fn store_procedure(
|
||||
pub(crate) async fn store_procedure(
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
@@ -58,11 +67,12 @@ impl ProcedureStore {
|
||||
type_name: type_name.to_string(),
|
||||
data,
|
||||
parent_id,
|
||||
step,
|
||||
};
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step,
|
||||
is_committed: false,
|
||||
key_type: KeyType::Step,
|
||||
}
|
||||
.to_string();
|
||||
let value = serde_json::to_string(&message).context(ToJsonSnafu)?;
|
||||
@@ -73,11 +83,32 @@ impl ProcedureStore {
|
||||
}
|
||||
|
||||
/// Write commit flag to the storage.
|
||||
async fn commit_procedure(&self, procedure_id: ProcedureId, step: u32) -> Result<()> {
|
||||
pub(crate) async fn commit_procedure(
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
) -> Result<()> {
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step,
|
||||
is_committed: true,
|
||||
key_type: KeyType::Commit,
|
||||
}
|
||||
.to_string();
|
||||
self.0.put(&key, Vec::new()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write rollback flag to the storage.
|
||||
pub(crate) async fn rollback_procedure(
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
) -> Result<()> {
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step,
|
||||
key_type: KeyType::Rollback,
|
||||
}
|
||||
.to_string();
|
||||
self.0.put(&key, Vec::new()).await?;
|
||||
@@ -86,7 +117,7 @@ impl ProcedureStore {
|
||||
}
|
||||
|
||||
/// Load uncommitted procedures from the storage.
|
||||
async fn load_messages(&self) -> Result<HashMap<ProcedureId, ProcedureMessage>> {
|
||||
pub(crate) async fn load_messages(&self) -> Result<HashMap<ProcedureId, ProcedureMessage>> {
|
||||
let mut messages = HashMap::new();
|
||||
// Track the key-value pair by procedure id.
|
||||
let mut procedure_key_values: HashMap<_, (ParsedKey, Vec<u8>)> = HashMap::new();
|
||||
@@ -110,7 +141,7 @@ impl ProcedureStore {
|
||||
}
|
||||
|
||||
for (procedure_id, (parsed_key, value)) in procedure_key_values {
|
||||
if !parsed_key.is_committed {
|
||||
if parsed_key.key_type == KeyType::Step {
|
||||
let Some(message) = self.load_one_message(&parsed_key, &value) else {
|
||||
// We don't abort the loading process and just ignore errors to ensure all remaining
|
||||
// procedures are loaded.
|
||||
@@ -134,12 +165,47 @@ impl ProcedureStore {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ObjectStore> for ProcedureStore {
|
||||
fn from(store: ObjectStore) -> ProcedureStore {
|
||||
let state_store = ObjectStateStore::new(store);
|
||||
|
||||
ProcedureStore::new(Arc::new(state_store))
|
||||
}
|
||||
}
|
||||
|
||||
/// Suffix type of the key.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum KeyType {
|
||||
Step,
|
||||
Commit,
|
||||
Rollback,
|
||||
}
|
||||
|
||||
impl KeyType {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
KeyType::Step => "step",
|
||||
KeyType::Commit => "commit",
|
||||
KeyType::Rollback => "rollback",
|
||||
}
|
||||
}
|
||||
|
||||
fn from_str(s: &str) -> Option<KeyType> {
|
||||
match s {
|
||||
"step" => Some(KeyType::Step),
|
||||
"commit" => Some(KeyType::Commit),
|
||||
"rollback" => Some(KeyType::Rollback),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Key to refer the procedure in the [ProcedureStore].
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct ParsedKey {
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
is_committed: bool,
|
||||
key_type: KeyType,
|
||||
}
|
||||
|
||||
impl fmt::Display for ParsedKey {
|
||||
@@ -149,7 +215,7 @@ impl fmt::Display for ParsedKey {
|
||||
"{}/{:010}.{}",
|
||||
self.procedure_id,
|
||||
self.step,
|
||||
if self.is_committed { "commit" } else { "step" }
|
||||
self.key_type.as_str(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -166,41 +232,42 @@ impl ParsedKey {
|
||||
let mut parts = name.split('.');
|
||||
let step_str = parts.next()?;
|
||||
let suffix = parts.next()?;
|
||||
let is_committed = match suffix {
|
||||
"commit" => true,
|
||||
"step" => false,
|
||||
_ => return None,
|
||||
};
|
||||
let key_type = KeyType::from_str(suffix)?;
|
||||
let step = step_str.parse().ok()?;
|
||||
|
||||
Some(ParsedKey {
|
||||
procedure_id,
|
||||
step,
|
||||
is_committed,
|
||||
key_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::store::state_store::ObjectStateStore;
|
||||
use crate::{Context, LockKey, Procedure, Status};
|
||||
|
||||
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
|
||||
ProcedureStore::from(object_store)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsed_key() {
|
||||
let procedure_id = ProcedureId::random();
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step: 2,
|
||||
is_committed: false,
|
||||
key_type: KeyType::Step,
|
||||
};
|
||||
assert_eq!(format!("{procedure_id}/0000000002.step"), key.to_string());
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
@@ -208,10 +275,21 @@ mod tests {
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step: 2,
|
||||
is_committed: true,
|
||||
key_type: KeyType::Commit,
|
||||
};
|
||||
assert_eq!(format!("{procedure_id}/0000000002.commit"), key.to_string());
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
|
||||
let key = ParsedKey {
|
||||
procedure_id,
|
||||
step: 2,
|
||||
key_type: KeyType::Rollback,
|
||||
};
|
||||
assert_eq!(
|
||||
format!("{procedure_id}/0000000002.rollback"),
|
||||
key.to_string()
|
||||
);
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -246,12 +324,13 @@ mod tests {
|
||||
type_name: "TestMessage".to_string(),
|
||||
data: "no parent id".to_string(),
|
||||
parent_id: None,
|
||||
step: 4,
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&message).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type_name":"TestMessage","data":"no parent id","parent_id":null}"#
|
||||
r#"{"type_name":"TestMessage","data":"no parent id","parent_id":null,"step":4}"#
|
||||
);
|
||||
|
||||
let procedure_id = ProcedureId::parse_str("9f805a1f-05f7-490c-9f91-bd56e3cc54c1").unwrap();
|
||||
@@ -259,7 +338,7 @@ mod tests {
|
||||
let json = serde_json::to_string(&message).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type_name":"TestMessage","data":"no parent id","parent_id":"9f805a1f-05f7-490c-9f91-bd56e3cc54c1"}"#
|
||||
r#"{"type_name":"TestMessage","data":"no parent id","parent_id":"9f805a1f-05f7-490c-9f91-bd56e3cc54c1","step":4}"#
|
||||
);
|
||||
}
|
||||
|
||||
@@ -287,24 +366,15 @@ mod tests {
|
||||
Ok(self.data.clone())
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> Option<LockKey> {
|
||||
None
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn new_procedure_store(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
ProcedureStore(Arc::new(state_store))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_store_procedure() {
|
||||
let dir = TempDir::new("store_procedure").unwrap();
|
||||
let store = new_procedure_store(&dir);
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let procedure: BoxedProcedure = Box::new(MockProcedure::new("test store procedure"));
|
||||
@@ -321,14 +391,15 @@ mod tests {
|
||||
type_name: "MockProcedure".to_string(),
|
||||
data: "test store procedure".to_string(),
|
||||
parent_id: None,
|
||||
step: 0,
|
||||
};
|
||||
assert_eq!(expect, *msg);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_commit_procedure() {
|
||||
let dir = TempDir::new("store_procedure").unwrap();
|
||||
let store = new_procedure_store(&dir);
|
||||
let dir = TempDir::new("commit_procedure").unwrap();
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let procedure: BoxedProcedure = Box::new(MockProcedure::new("test store procedure"));
|
||||
@@ -343,10 +414,28 @@ mod tests {
|
||||
assert!(messages.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rollback_procedure() {
|
||||
let dir = TempDir::new("rollback_procedure").unwrap();
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let procedure: BoxedProcedure = Box::new(MockProcedure::new("test store procedure"));
|
||||
|
||||
store
|
||||
.store_procedure(procedure_id, 0, &procedure, None)
|
||||
.await
|
||||
.unwrap();
|
||||
store.rollback_procedure(procedure_id, 1).await.unwrap();
|
||||
|
||||
let messages = store.load_messages().await.unwrap();
|
||||
assert!(messages.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_messages() {
|
||||
let dir = TempDir::new("store_procedure").unwrap();
|
||||
let store = new_procedure_store(&dir);
|
||||
let dir = TempDir::new("load_messages").unwrap();
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
// store 3 steps
|
||||
let id0 = ProcedureId::random();
|
||||
|
||||
@@ -20,9 +20,7 @@ use futures::{Stream, TryStreamExt};
|
||||
use object_store::{ObjectMode, ObjectStore};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
DeleteStateSnafu, Error, ListStateSnafu, PutStateSnafu, ReadStateSnafu, Result,
|
||||
};
|
||||
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
|
||||
|
||||
/// Key value from state store.
|
||||
type KeyValue = (String, Vec<u8>);
|
||||
@@ -72,22 +70,23 @@ impl StateStore for ObjectStateStore {
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
let op = self.store.batch();
|
||||
// Note that there is no guarantee about the order between files and dirs
|
||||
// at the same level.
|
||||
// See https://docs.rs/opendal/0.25.2/opendal/raw/struct.TopDownWalker.html#note
|
||||
let stream = op
|
||||
.walk_top_down(path)
|
||||
.context(ListStateSnafu { path })?
|
||||
.map_err(move |e| Error::ListState {
|
||||
|
||||
let lister = self
|
||||
.store
|
||||
.object(path)
|
||||
.scan()
|
||||
.await
|
||||
.map_err(|e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})
|
||||
})?;
|
||||
|
||||
let stream = lister
|
||||
.try_filter_map(|entry| async move {
|
||||
let key = entry.path();
|
||||
let key_value = match entry.mode().await.context(ReadStateSnafu { key })? {
|
||||
let key_value = match entry.mode().await? {
|
||||
ObjectMode::FILE => {
|
||||
let value = entry.read().await.context(ReadStateSnafu { key })?;
|
||||
let value = entry.read().await?;
|
||||
|
||||
Some((key.to_string(), value))
|
||||
}
|
||||
@@ -95,6 +94,10 @@ impl StateStore for ObjectStateStore {
|
||||
};
|
||||
|
||||
Ok(key_value)
|
||||
})
|
||||
.map_err(move |e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
});
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
@@ -112,7 +115,8 @@ impl StateStore for ObjectStateStore {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
@@ -122,7 +126,7 @@ mod tests {
|
||||
let dir = TempDir::new("state_store").unwrap();
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
let data: Vec<_> = state_store
|
||||
|
||||
@@ -14,7 +14,7 @@ datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
snafu.workspace = true
|
||||
statrs = "0.15"
|
||||
statrs = "0.16"
|
||||
|
||||
[dev-dependencies]
|
||||
common-base = { path = "../base" }
|
||||
|
||||
@@ -14,13 +14,13 @@ datafusion.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
futures = "0.3"
|
||||
prost = "0.9"
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
table = { path = "../../table" }
|
||||
|
||||
[dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
datatypes = { path = "../../datatypes" }
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use substrait_proto::protobuf::extensions::simple_extension_declaration::{
|
||||
use substrait_proto::proto::extensions::simple_extension_declaration::{
|
||||
ExtensionFunction, MappingType,
|
||||
};
|
||||
use substrait_proto::protobuf::extensions::SimpleExtensionDeclaration;
|
||||
use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ConvertorContext {
|
||||
|
||||
@@ -20,15 +20,15 @@ use datafusion_expr::expr::Sort;
|
||||
use datafusion_expr::{expr_fn, lit, Between, BinaryExpr, BuiltinScalarFunction, Expr, Operator};
|
||||
use datatypes::schema::Schema;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use substrait_proto::protobuf::expression::field_reference::ReferenceType as FieldReferenceType;
|
||||
use substrait_proto::protobuf::expression::reference_segment::{
|
||||
use substrait_proto::proto::expression::field_reference::ReferenceType as FieldReferenceType;
|
||||
use substrait_proto::proto::expression::reference_segment::{
|
||||
ReferenceType as SegReferenceType, StructField,
|
||||
};
|
||||
use substrait_proto::protobuf::expression::{
|
||||
use substrait_proto::proto::expression::{
|
||||
FieldReference, Literal, ReferenceSegment, RexType, ScalarFunction,
|
||||
};
|
||||
use substrait_proto::protobuf::function_argument::ArgType;
|
||||
use substrait_proto::protobuf::Expression;
|
||||
use substrait_proto::proto::function_argument::ArgType;
|
||||
use substrait_proto::proto::Expression;
|
||||
|
||||
use crate::context::ConvertorContext;
|
||||
use crate::error::{
|
||||
@@ -61,6 +61,7 @@ pub(crate) fn to_df_expr(
|
||||
| RexType::MultiOrList(_)
|
||||
| RexType::Cast(_)
|
||||
| RexType::Subquery(_)
|
||||
| RexType::Nested(_)
|
||||
| RexType::Enum(_) => UnsupportedExprSnafu {
|
||||
name: format!("substrait expression {expr_rex_type:?}"),
|
||||
}
|
||||
@@ -615,9 +616,9 @@ pub fn convert_column(column: &Column, schema: &Schema) -> Result<FieldReference
|
||||
/// Some utils special for this `DataFusion::Expr` and `Substrait::Expression` conversion.
|
||||
mod utils {
|
||||
use datafusion_expr::{BuiltinScalarFunction, Operator};
|
||||
use substrait_proto::protobuf::expression::{RexType, ScalarFunction};
|
||||
use substrait_proto::protobuf::function_argument::ArgType;
|
||||
use substrait_proto::protobuf::{Expression, FunctionArgument};
|
||||
use substrait_proto::proto::expression::{RexType, ScalarFunction};
|
||||
use substrait_proto::proto::function_argument::ArgType;
|
||||
use substrait_proto::proto::{Expression, FunctionArgument};
|
||||
|
||||
pub(crate) fn name_df_operator(op: &Operator) -> &str {
|
||||
match op {
|
||||
|
||||
@@ -25,13 +25,13 @@ use datafusion::physical_plan::project_schema;
|
||||
use datafusion_expr::{Filter, LogicalPlan, TableScan, TableSource};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use substrait_proto::protobuf::expression::mask_expression::{StructItem, StructSelect};
|
||||
use substrait_proto::protobuf::expression::MaskExpression;
|
||||
use substrait_proto::protobuf::extensions::simple_extension_declaration::MappingType;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{FilterRel, Plan, PlanRel, ReadRel, Rel};
|
||||
use substrait_proto::proto::expression::mask_expression::{StructItem, StructSelect};
|
||||
use substrait_proto::proto::expression::MaskExpression;
|
||||
use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{FilterRel, Plan, PlanRel, ReadRel, Rel};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::context::ConvertorContext;
|
||||
@@ -424,6 +424,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
relations: vec![plan_rel],
|
||||
advanced_extensions: None,
|
||||
expected_type_urls: vec![],
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -485,6 +486,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
projection,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(read_rel)
|
||||
@@ -522,7 +524,7 @@ mod test {
|
||||
use catalog::{CatalogList, CatalogProvider, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datafusion::common::{DFSchema, ToDFSchema};
|
||||
use datatypes::schema::Schema;
|
||||
use datatypes::schema::RawSchema;
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::test_util::{EmptyTable, MockTableEngine};
|
||||
|
||||
@@ -558,7 +560,7 @@ mod test {
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: None,
|
||||
schema: Arc::new(Schema::new(supported_types())),
|
||||
schema: RawSchema::new(supported_types()),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: true,
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use substrait_proto::protobuf::r#type::{Nullability, Struct as SubstraitStruct};
|
||||
use substrait_proto::protobuf::NamedStruct;
|
||||
use substrait_proto::proto::r#type::{Nullability, Struct as SubstraitStruct};
|
||||
use substrait_proto::proto::NamedStruct;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::types::{from_concrete_type, to_concrete_type};
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
|
||||
use datafusion::scalar::ScalarValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use substrait_proto::protobuf::expression::literal::LiteralType;
|
||||
use substrait_proto::protobuf::r#type::{self as s_type, Kind, Nullability};
|
||||
use substrait_proto::protobuf::{Type as SType, Type};
|
||||
use substrait_proto::proto::expression::literal::LiteralType;
|
||||
use substrait_proto::proto::r#type::{self as s_type, Kind, Nullability};
|
||||
use substrait_proto::proto::{Type as SType, Type};
|
||||
|
||||
use crate::error::{self, Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
|
||||
|
||||
@@ -86,6 +86,7 @@ pub fn to_concrete_type(ty: &SType) -> Result<(ConcreteDataType, bool)> {
|
||||
| Kind::Struct(_)
|
||||
| Kind::List(_)
|
||||
| Kind::Map(_)
|
||||
| Kind::UserDefined(_)
|
||||
| Kind::UserDefinedTypeReference(_) => UnsupportedSubstraitTypeSnafu {
|
||||
ty: format!("{kind:?}"),
|
||||
}
|
||||
@@ -131,7 +132,9 @@ pub fn from_concrete_type(ty: ConcreteDataType, nullability: Option<bool>) -> Re
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
build_substrait_kind!(Timestamp, Timestamp, nullability, 0)
|
||||
}
|
||||
ConcreteDataType::List(_) => UnsupportedConcreteTypeSnafu { ty }.fail()?,
|
||||
ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
UnsupportedConcreteTypeSnafu { ty }.fail()?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(SType { kind })
|
||||
|
||||
@@ -26,6 +26,7 @@ use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::fmt::Layer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{filter, EnvFilter, Registry};
|
||||
|
||||
pub use crate::{debug, error, info, log, trace, warn};
|
||||
@@ -77,6 +78,15 @@ pub fn init_global_logging(
|
||||
let file_logging_layer = BunyanFormattingLayer::new(app_name.to_string(), rolling_writer);
|
||||
guards.push(rolling_writer_guard);
|
||||
|
||||
// error JSON log layer.
|
||||
let err_rolling_appender =
|
||||
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
|
||||
let (err_rolling_writer, err_rolling_writer_guard) =
|
||||
tracing_appender::non_blocking(err_rolling_appender);
|
||||
let err_file_logging_layer =
|
||||
BunyanFormattingLayer::new(app_name.to_string(), err_rolling_writer);
|
||||
guards.push(err_rolling_writer_guard);
|
||||
|
||||
// Use env RUST_LOG to initialize log if present.
|
||||
// Otherwise use the specified level.
|
||||
let directives = env::var(EnvFilter::DEFAULT_ENV).unwrap_or_else(|_x| level.to_string());
|
||||
@@ -99,7 +109,8 @@ pub fn init_global_logging(
|
||||
.with(filter)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer);
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
|
||||
|
||||
// Must enable 'tokio_unstable' cfg, https://github.com/tokio-rs/console
|
||||
#[cfg(feature = "console")]
|
||||
|
||||
@@ -5,7 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4"
|
||||
chrono.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -71,6 +71,10 @@ impl Date {
|
||||
pub fn val(&self) -> i32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn to_chrono_date(&self) -> Option<NaiveDate> {
|
||||
NaiveDate::from_num_days_from_ce_opt(UNIX_EPOCH_FROM_CE + self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::error::{Error, ParseDateStrSnafu, Result};
|
||||
|
||||
const DATETIME_FORMAT: &str = "%F %T";
|
||||
|
||||
/// [DateTime] represents the **seconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
|
||||
/// [DateTime] represents the **seconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
|
||||
#[derive(
|
||||
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||
)]
|
||||
@@ -69,6 +69,10 @@ impl DateTime {
|
||||
pub fn val(&self) -> i64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn to_chrono_datetime(&self) -> Option<NaiveDateTime> {
|
||||
NaiveDateTime::from_timestamp_millis(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::num::TryFromIntError;
|
||||
|
||||
use chrono::ParseError;
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -24,8 +25,18 @@ use snafu::{Backtrace, ErrorCompat, Snafu};
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to parse string to date, raw: {}, source: {}", raw, source))]
|
||||
ParseDateStr { raw: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to parse a string into Timestamp, raw string: {}", raw))]
|
||||
ParseTimestamp { raw: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Current timestamp overflow, source: {}", source))]
|
||||
TimestampOverflow {
|
||||
source: TryFromIntError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))]
|
||||
ArithmeticOverflow { msg: String, backtrace: Backtrace },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -34,6 +45,8 @@ impl ErrorExt for Error {
|
||||
Error::ParseDateStr { .. } | Error::ParseTimestamp { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::TimestampOverflow { .. } => StatusCode::Internal,
|
||||
Error::ArithmeticOverflow { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![feature(int_roundings)]
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -205,7 +205,7 @@ impl TimestampRange {
|
||||
pub fn new_inclusive(start: Option<Timestamp>, end: Option<Timestamp>) -> Self {
|
||||
// check for emptiness
|
||||
if let (Some(start_ts), Some(end_ts)) = (start, end) {
|
||||
if start_ts >= end_ts {
|
||||
if start_ts > end_ts {
|
||||
return Self::empty();
|
||||
}
|
||||
}
|
||||
@@ -462,4 +462,29 @@ mod tests {
|
||||
|
||||
assert!(!full.intersects(&empty));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_inclusive() {
|
||||
let range = TimestampRange::new_inclusive(
|
||||
Some(Timestamp::new_millisecond(1)),
|
||||
Some(Timestamp::new_millisecond(3)),
|
||||
);
|
||||
assert!(!range.is_empty());
|
||||
assert!(range.contains(&Timestamp::new_millisecond(1)));
|
||||
assert!(range.contains(&Timestamp::new_millisecond(3)));
|
||||
|
||||
let range = TimestampRange::new_inclusive(
|
||||
Some(Timestamp::new_millisecond(1)),
|
||||
Some(Timestamp::new_millisecond(1)),
|
||||
);
|
||||
assert!(!range.is_empty());
|
||||
assert_eq!(1, range.start.unwrap().value());
|
||||
assert!(range.contains(&Timestamp::new_millisecond(1)));
|
||||
|
||||
let range = TimestampRange::new_inclusive(
|
||||
Some(Timestamp::new_millisecond(2)),
|
||||
Some(Timestamp::new_millisecond(1)),
|
||||
);
|
||||
assert!(range.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ use std::cmp::Ordering;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::offset::Local;
|
||||
use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{Error, ParseTimestampSnafu};
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -31,6 +34,50 @@ pub struct Timestamp {
|
||||
}
|
||||
|
||||
impl Timestamp {
|
||||
/// Creates current timestamp in millisecond.
|
||||
pub fn current_millis() -> Self {
|
||||
Self {
|
||||
value: crate::util::current_time_millis(),
|
||||
unit: TimeUnit::Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
/// Subtracts a duration from timestamp.
|
||||
/// # Note
|
||||
/// The result time unit remains unchanged even if `duration` has a different unit with `self`.
|
||||
/// For example, a timestamp with value 1 and time unit second, subtracted by 1 millisecond
|
||||
/// and the result is still 1 second.
|
||||
pub fn sub(&self, duration: Duration) -> error::Result<Self> {
|
||||
let duration: i64 = match self.unit {
|
||||
TimeUnit::Second => {
|
||||
i64::try_from(duration.as_secs()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Millisecond => {
|
||||
i64::try_from(duration.as_millis()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Microsecond => {
|
||||
i64::try_from(duration.as_micros()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Nanosecond => {
|
||||
i64::try_from(duration.as_nanos()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
};
|
||||
|
||||
let value = self
|
||||
.value
|
||||
.checked_sub(duration)
|
||||
.with_context(|| ArithmeticOverflowSnafu {
|
||||
msg: format!(
|
||||
"Try to subtract timestamp: {:?} with duration: {:?}",
|
||||
self, duration
|
||||
),
|
||||
})?;
|
||||
Ok(Timestamp {
|
||||
value,
|
||||
unit: self.unit,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(value: i64, unit: TimeUnit) -> Self {
|
||||
Self { unit, value }
|
||||
}
|
||||
@@ -77,43 +124,56 @@ impl Timestamp {
|
||||
pub fn convert_to(&self, unit: TimeUnit) -> Option<Timestamp> {
|
||||
if self.unit().factor() >= unit.factor() {
|
||||
let mul = self.unit().factor() / unit.factor();
|
||||
let value = self.value.checked_mul(mul)?;
|
||||
let value = self.value.checked_mul(mul as i64)?;
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_euclid(mul), unit))
|
||||
Some(Timestamp::new(self.value.div_euclid(mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a timestamp to given time unit.
|
||||
/// Conversion from a timestamp with smaller unit to a larger unit will round the value
|
||||
/// to ceil (positive infinity).
|
||||
/// Return `None` if conversion causes overflow.
|
||||
pub fn convert_to_ceil(&self, unit: TimeUnit) -> Option<Timestamp> {
|
||||
if self.unit().factor() >= unit.factor() {
|
||||
let mul = self.unit().factor() / unit.factor();
|
||||
let value = self.value.checked_mul(mul as i64)?;
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split a [Timestamp] into seconds part and nanoseconds part.
|
||||
/// Notice the seconds part of split result is always rounded down to floor.
|
||||
fn split(&self) -> (i64, i64) {
|
||||
let sec_mul = TimeUnit::Second.factor() / self.unit.factor();
|
||||
let nsec_mul = self.unit.factor() / TimeUnit::Nanosecond.factor();
|
||||
fn split(&self) -> (i64, u32) {
|
||||
let sec_mul = (TimeUnit::Second.factor() / self.unit.factor()) as i64;
|
||||
let nsec_mul = (self.unit.factor() / TimeUnit::Nanosecond.factor()) as i64;
|
||||
|
||||
let sec_div = self.value.div_euclid(sec_mul);
|
||||
let sec_mod = self.value.rem_euclid(sec_mul);
|
||||
(sec_div, sec_mod * nsec_mul)
|
||||
// safety: the max possible value of `sec_mod` is 999,999,999
|
||||
let nsec = u32::try_from(sec_mod * nsec_mul).unwrap();
|
||||
(sec_div, nsec)
|
||||
}
|
||||
|
||||
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
|
||||
/// represent, this function simply print the timestamp unit and value in plain string.
|
||||
pub fn to_iso8601_string(&self) -> String {
|
||||
let nano_factor = TimeUnit::Second.factor() / TimeUnit::Nanosecond.factor();
|
||||
|
||||
let (mut secs, mut nsecs) = self.split();
|
||||
|
||||
if nsecs < 0 {
|
||||
secs -= 1;
|
||||
nsecs += nano_factor;
|
||||
}
|
||||
|
||||
if let LocalResult::Single(datetime) = Utc.timestamp_opt(secs, nsecs as u32) {
|
||||
if let LocalResult::Single(datetime) = self.to_chrono_datetime() {
|
||||
format!("{}", datetime.format("%Y-%m-%d %H:%M:%S%.f%z"))
|
||||
} else {
|
||||
format!("[Timestamp{}: {}]", self.unit, self.value)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_chrono_datetime(&self) -> LocalResult<DateTime<Utc>> {
|
||||
let (sec, nsec) = self.split();
|
||||
Utc.timestamp_opt(sec, nsec)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Timestamp {
|
||||
@@ -234,7 +294,7 @@ impl Display for TimeUnit {
|
||||
}
|
||||
|
||||
impl TimeUnit {
|
||||
pub fn factor(&self) -> i64 {
|
||||
pub fn factor(&self) -> u32 {
|
||||
match self {
|
||||
TimeUnit::Second => 1_000_000_000,
|
||||
TimeUnit::Millisecond => 1_000_000,
|
||||
@@ -282,7 +342,7 @@ impl Hash for Timestamp {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
let (sec, nsec) = self.split();
|
||||
state.write_i64(sec);
|
||||
state.write_i64(nsec);
|
||||
state.write_u32(nsec);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -718,4 +778,94 @@ mod tests {
|
||||
Timestamp::new(i64::MAX, TimeUnit::Second).split()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_to_ceil() {
|
||||
assert_eq!(
|
||||
Timestamp::new(1, TimeUnit::Second),
|
||||
Timestamp::new(1000, TimeUnit::Millisecond)
|
||||
.convert_to_ceil(TimeUnit::Second)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// These two cases shows how `Timestamp::convert_to_ceil` behaves differently
|
||||
// from `Timestamp::convert_to` when converting larger unit to smaller unit.
|
||||
assert_eq!(
|
||||
Timestamp::new(1, TimeUnit::Second),
|
||||
Timestamp::new(1001, TimeUnit::Millisecond)
|
||||
.convert_to(TimeUnit::Second)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new(2, TimeUnit::Second),
|
||||
Timestamp::new(1001, TimeUnit::Millisecond)
|
||||
.convert_to_ceil(TimeUnit::Second)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Timestamp::new(-1, TimeUnit::Second),
|
||||
Timestamp::new(-1, TimeUnit::Millisecond)
|
||||
.convert_to(TimeUnit::Second)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new(0, TimeUnit::Second),
|
||||
Timestamp::new(-1, TimeUnit::Millisecond)
|
||||
.convert_to_ceil(TimeUnit::Second)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// When converting large unit to smaller unit, there will be no rounding error,
|
||||
// so `Timestamp::convert_to_ceil` behaves just like `Timestamp::convert_to`
|
||||
assert_eq!(
|
||||
Timestamp::new(-1, TimeUnit::Second).convert_to(TimeUnit::Millisecond),
|
||||
Timestamp::new(-1, TimeUnit::Second).convert_to_ceil(TimeUnit::Millisecond)
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new(1000, TimeUnit::Second).convert_to(TimeUnit::Millisecond),
|
||||
Timestamp::new(1000, TimeUnit::Second).convert_to_ceil(TimeUnit::Millisecond)
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new(1, TimeUnit::Second).convert_to(TimeUnit::Millisecond),
|
||||
Timestamp::new(1, TimeUnit::Second).convert_to_ceil(TimeUnit::Millisecond)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_overflow() {
|
||||
Timestamp::new(i64::MAX, TimeUnit::Second).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Second).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Millisecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Millisecond).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Microsecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Microsecond).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Nanosecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
|
||||
let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
|
||||
let time = NaiveDateTime::from_timestamp_opt(sec, nsec).unwrap();
|
||||
assert_eq!(sec, time.timestamp());
|
||||
assert_eq!(nsec, time.timestamp_subsec_nanos());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_sub() {
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(0, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(0, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(-1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_millis(1))
|
||||
.unwrap();
|
||||
assert_eq!(1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use crate::Timestamp;
|
||||
|
||||
/// Unix timestamp in millisecond resolution.
|
||||
///
|
||||
/// Negative timestamp is allowed, which represents timestamp before '1970-01-01T00:00:00'.
|
||||
@@ -77,22 +79,29 @@ impl PartialOrd<TimestampMillis> for i64 {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BucketAligned {
|
||||
/// Returns the timestamp aligned by `bucket_duration` in milliseconds or
|
||||
/// `None` if overflow occurred.
|
||||
pub trait BucketAligned: Sized {
|
||||
/// Returns the timestamp aligned by `bucket_duration` or `None` if underflow occurred.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if `bucket_duration <= 0`.
|
||||
fn align_by_bucket(self, bucket_duration: i64) -> Option<TimestampMillis>;
|
||||
fn align_by_bucket(self, bucket_duration: i64) -> Option<Self>;
|
||||
}
|
||||
|
||||
impl<T: Into<i64>> BucketAligned for T {
|
||||
fn align_by_bucket(self, bucket_duration: i64) -> Option<TimestampMillis> {
|
||||
impl BucketAligned for i64 {
|
||||
fn align_by_bucket(self, bucket_duration: i64) -> Option<Self> {
|
||||
assert!(bucket_duration > 0, "{}", bucket_duration);
|
||||
self.into()
|
||||
.checked_div_euclid(bucket_duration)
|
||||
self.checked_div_euclid(bucket_duration)
|
||||
.and_then(|val| val.checked_mul(bucket_duration))
|
||||
.map(TimestampMillis)
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketAligned for Timestamp {
|
||||
fn align_by_bucket(self, bucket_duration: i64) -> Option<Self> {
|
||||
assert!(bucket_duration > 0, "{}", bucket_duration);
|
||||
let unit = self.unit();
|
||||
self.value()
|
||||
.align_by_bucket(bucket_duration)
|
||||
.map(|val| Timestamp::new(val, unit))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,24 +130,54 @@ mod tests {
|
||||
#[test]
|
||||
fn test_align_by_bucket() {
|
||||
let bucket = 100;
|
||||
assert_eq!(0, TimestampMillis::new(0).align_by_bucket(bucket).unwrap());
|
||||
assert_eq!(0, TimestampMillis::new(1).align_by_bucket(bucket).unwrap());
|
||||
assert_eq!(0, TimestampMillis::new(99).align_by_bucket(bucket).unwrap());
|
||||
assert_eq!(
|
||||
100,
|
||||
TimestampMillis::new(100).align_by_bucket(bucket).unwrap()
|
||||
Timestamp::new_millisecond(0),
|
||||
Timestamp::new_millisecond(0)
|
||||
.align_by_bucket(bucket)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
100,
|
||||
TimestampMillis::new(199).align_by_bucket(bucket).unwrap()
|
||||
Timestamp::new_millisecond(0),
|
||||
Timestamp::new_millisecond(1)
|
||||
.align_by_bucket(bucket)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new_millisecond(0),
|
||||
Timestamp::new_millisecond(99)
|
||||
.align_by_bucket(bucket)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new_millisecond(100),
|
||||
Timestamp::new_millisecond(100)
|
||||
.align_by_bucket(bucket)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Timestamp::new_millisecond(100),
|
||||
Timestamp::new_millisecond(199)
|
||||
.align_by_bucket(bucket)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(0, TimestampMillis::MAX.align_by_bucket(i64::MAX).unwrap());
|
||||
assert_eq!(
|
||||
i64::MAX,
|
||||
TimestampMillis::INF.align_by_bucket(i64::MAX).unwrap()
|
||||
Timestamp::new_millisecond(0),
|
||||
Timestamp::new_millisecond(i64::MAX - 1)
|
||||
.align_by_bucket(i64::MAX)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(None, TimestampMillis::MIN.align_by_bucket(bucket));
|
||||
assert_eq!(
|
||||
Timestamp::new_millisecond(i64::MAX),
|
||||
Timestamp::new_millisecond(i64::MAX)
|
||||
.align_by_bucket(i64::MAX)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
None,
|
||||
Timestamp::new_millisecond(i64::MIN).align_by_bucket(bucket)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user