mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
89 Commits
v0.7.0-nig
...
v0.7.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ca9e01455 | ||
|
|
3a326775ee | ||
|
|
5ad3b7984e | ||
|
|
4fc27bdc75 | ||
|
|
e3c82568e5 | ||
|
|
61f0703af8 | ||
|
|
b85d7bb575 | ||
|
|
d334d74986 | ||
|
|
5ca8521e87 | ||
|
|
e4333969b4 | ||
|
|
b55905cf66 | ||
|
|
fb4da05f25 | ||
|
|
904484b525 | ||
|
|
cafb4708ce | ||
|
|
7c895e2605 | ||
|
|
9afe327bca | ||
|
|
58bd065c6b | ||
|
|
9aa8f756ab | ||
|
|
7639c227ca | ||
|
|
1255c1fc9e | ||
|
|
06dcd0f6ed | ||
|
|
0a4444a43a | ||
|
|
b7ac8d6aa8 | ||
|
|
e767f37241 | ||
|
|
da098f5568 | ||
|
|
aa953dcc34 | ||
|
|
aa125a50f9 | ||
|
|
d8939eb891 | ||
|
|
0bb949787c | ||
|
|
8c37c3fc0f | ||
|
|
21ff3620be | ||
|
|
aeca0d8e8a | ||
|
|
a309cd018a | ||
|
|
3ee53360ee | ||
|
|
352bd7b6fd | ||
|
|
3f3ef2e7af | ||
|
|
a218f12bd9 | ||
|
|
c884c56151 | ||
|
|
9ec288cab9 | ||
|
|
1f1491e429 | ||
|
|
c52bc613e0 | ||
|
|
a9d42f7b87 | ||
|
|
86ce2d8713 | ||
|
|
5d644c0b7f | ||
|
|
020635063c | ||
|
|
97cbfcfe23 | ||
|
|
7183fa198c | ||
|
|
02b18fbca1 | ||
|
|
7b1c3503d0 | ||
|
|
6fd2ff49d5 | ||
|
|
53f2a5846c | ||
|
|
49157868f9 | ||
|
|
ae2c18e1cf | ||
|
|
e6819412c5 | ||
|
|
2a675e0794 | ||
|
|
0edf1bbacc | ||
|
|
8609977b52 | ||
|
|
2d975e4f22 | ||
|
|
00cbbc97ae | ||
|
|
7d30c2484b | ||
|
|
376409b857 | ||
|
|
d4a54a085b | ||
|
|
c1a370649e | ||
|
|
3cad9d989d | ||
|
|
a50025269f | ||
|
|
a3533c4ea0 | ||
|
|
3413fc0781 | ||
|
|
dc205a2c5d | ||
|
|
a0a8e8c587 | ||
|
|
c3c80b92c8 | ||
|
|
a8cbec824c | ||
|
|
33d894c1f0 | ||
|
|
7942b8fae9 | ||
|
|
b97f957489 | ||
|
|
f3d69e9563 | ||
|
|
4b36c285f1 | ||
|
|
dbb1ce1a9b | ||
|
|
3544c9334c | ||
|
|
492a00969d | ||
|
|
206666bff6 | ||
|
|
7453d9779d | ||
|
|
8e3e0fd528 | ||
|
|
b1e290f959 | ||
|
|
d8dc93fccc | ||
|
|
3887d207b6 | ||
|
|
e859f0e67d | ||
|
|
ce397ebcc6 | ||
|
|
26011ed0b6 | ||
|
|
8087822ab2 |
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@@ -0,0 +1,10 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[{Makefile,**.mk}]
|
||||
indent_style = tab
|
||||
@@ -21,3 +21,6 @@ GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
# Settings for kafka wal test
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Fuzz Test
|
||||
description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||
env:
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -1,8 +1,10 @@
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
## What's changed and what's your intention?
|
||||
|
||||
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
|
||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||
|
||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||
|
||||
@@ -16,5 +18,3 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
46
.github/workflows/develop.yml
vendored
46
.github/workflows/develop.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
shared-key: "build-binaries"
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
run: cargo build
|
||||
run: cargo build --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -117,6 +117,46 @@ jobs:
|
||||
artifacts-dir: bins
|
||||
version: current
|
||||
|
||||
fuzztest:
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
needs: build
|
||||
@@ -239,6 +279,10 @@ jobs:
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Docker Cache
|
||||
uses: ScribeMD/docker-cache@0.3.7
|
||||
with:
|
||||
key: docker-${{ runner.os }}-coverage
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.7.0
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -221,6 +221,8 @@ jobs:
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -260,6 +262,8 @@ jobs:
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -284,7 +288,7 @@ jobs:
|
||||
- name: Set build windows result
|
||||
id: set-build-windows-result
|
||||
run: |
|
||||
echo "build-windows-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -295,6 +299,8 @@ jobs:
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
outputs:
|
||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -310,7 +316,7 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-image-build-result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -46,3 +46,7 @@ benchmarks/data
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
# Fuzz tests
|
||||
tests-fuzz/artifacts/
|
||||
tests-fuzz/corpus/
|
||||
|
||||
414
Cargo.lock
generated
414
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -62,7 +62,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.6.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -78,7 +78,7 @@ aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-ipc = "47.0"
|
||||
arrow-ipc = { version = "47.0", features = ["lz4"] }
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
@@ -134,7 +134,7 @@ reqwest = { version = "0.11", default-features = false, features = [
|
||||
rskafka = "0.5"
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
|
||||
5
Makefile
5
Makefile
@@ -3,6 +3,7 @@ CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
TARGET ?=
|
||||
BUILD_BIN ?= greptime
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
@@ -45,6 +46,10 @@ ifneq ($(strip $(TARGET)),)
|
||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_BIN)),)
|
||||
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
@@ -29,7 +29,7 @@ use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -502,9 +502,9 @@ async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream, _) => {
|
||||
match res.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||
OutputData::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,5 +8,6 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
- "tests-integration/**/*.rs" # ignore integration tests
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -134,10 +134,22 @@ create_on_compaction = "auto"
|
||||
apply_on_query = "auto"
|
||||
# Memory threshold for performing an external sort during index creation.
|
||||
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64MB"
|
||||
mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
intermediate_path = ""
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "experimental": experimental memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "experimental"
|
||||
# The max number of keys in one shard.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
|
||||
@@ -31,6 +31,7 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres]
|
||||
@@ -43,6 +44,7 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb]
|
||||
|
||||
@@ -44,6 +44,8 @@ mode = "disable"
|
||||
cert_path = ""
|
||||
# Private key file path.
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres]
|
||||
@@ -62,6 +64,8 @@ mode = "disable"
|
||||
cert_path = ""
|
||||
# private key file path.
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
@@ -118,7 +122,7 @@ sync_period = "1000ms"
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# The prefix of topic name.
|
||||
@@ -240,6 +244,18 @@ mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
intermediate_path = ""
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "experimental": experimental memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "experimental"
|
||||
# The max number of keys in one shard.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
@@ -250,10 +266,11 @@ intermediate_path = ""
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# [logging.tracing_sample_ratio]
|
||||
# default_ratio = 0.0
|
||||
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
|
||||
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# TSBS benchmark - v0.7.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 50GB (GP3) |
|
||||
| OS | Ubuntu 22.04.1 |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
@@ -79,7 +79,7 @@ This RFC proposes to add a new expression node `MergeScan` to merge result from
|
||||
│ │ │ │
|
||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||
```
|
||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
This merge operation simply chains all the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
|
||||
## Commutativity of MergeScan
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 65 KiB |
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
Feature Name: Multi-dimension Partition Rule
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3351
|
||||
Date: 2024-02-21
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
A new region partition scheme that runs on multiple dimensions of the key space. The partition rule is defined by a set of simple expressions on the partition key columns.
|
||||
|
||||
# Motivation
|
||||
|
||||
The current partition rule is from MySQL's [`RANGE Partition`](https://dev.mysql.com/doc/refman/8.0/en/partitioning-range.html), which is based on a single dimension. It is sort of a [Hilbert Curve](https://en.wikipedia.org/wiki/Hilbert_curve) and pick several point on the curve to divide the space. It is neither easy to understand how the data get partitioned nor flexible enough to handle complex partitioning requirements.
|
||||
|
||||
Considering the future requirements like region repartitioning or autonomous rebalancing, where both workload and partition may change frequently. Here proposes a new region partition scheme that uses a set of simple expressions on the partition key columns to divide the key space.
|
||||
|
||||
# Details
|
||||
|
||||
## Partition rule
|
||||
|
||||
First, we define a simple expression that can be used to define the partition rule. The simple expression is a binary expression expression on the partition key columns that can be evaluated to a boolean value. The binary operator is limited to comparison operators only, like `=`, `!=`, `>`, `>=`, `<`, `<=`. And the operands are limited either literal value or partition column.
|
||||
|
||||
Example of valid simple expressions are $`col_A = 10`$, $`col_A \gt 10 \& col_B \gt 20`$ or $`col_A \ne 10`$.
|
||||
|
||||
Those expressions can be used as predicates to divide the key space into different regions. The following example have two partition columns `Col A` and `Col B`, and four partitioned regions.
|
||||
|
||||
```math
|
||||
\left\{\begin{aligned}
|
||||
|
||||
&col_A \le 10 &Region_1 \\
|
||||
&10 \lt col_A \& col_A \le 20 &Region_2 \\
|
||||
&20 \lt col_A \space \& \space col_B \lt 100 &Region_3 \\
|
||||
&20 \lt col_A \space \& \space col_B \ge 100 &Region_4
|
||||
|
||||
\end{aligned}\right\}
|
||||
```
|
||||
|
||||
An advantage of this scheme is that it is easy to understand how the data get partitioned. The above example can be visualized in a 2D space (two partition column is involved in the example).
|
||||
|
||||

|
||||
|
||||
Here each expression draws a line in the 2D space. Managing data partitioning becomes a matter of drawing lines in the key space.
|
||||
|
||||
To make it easy to use, there is a "default region" which catches all the data that doesn't match any of previous expressions. The default region exist by default and do not need to specify. It is also possible to remove this default region if the DB finds it is not necessary.
|
||||
|
||||
## SQL interface
|
||||
|
||||
The SQL interface is in response to two parts: specifying the partition columns and the partition rule. Thouth we are targeting an autonomous system, it's still allowed to give some bootstrap rules or hints on creating table.
|
||||
|
||||
Partition column is specified by `PARTITION ON COLUMNS` sub-clause in `CREATE TABLE`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE t (...)
|
||||
PARTITION ON COLUMNS (...) ();
|
||||
```
|
||||
|
||||
Two following brackets are for partition columns and partition rule respectively.
|
||||
|
||||
Columns provided here are only used as an allow-list of how the partition rule can be defined. Which means (a) the sequence between columns doesn't matter, (b) the columns provided here are not necessarily being used in the partition rule.
|
||||
|
||||
The partition rule part is a list of comma-separated simple expressions. Expressions here are not corresponding to region, as they might be changed by system to fit various workload.
|
||||
|
||||
A full example of `CREATE TABLE` with partition rule is:
|
||||
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS demo (
|
||||
a STRING,
|
||||
b STRING,
|
||||
c STRING,
|
||||
d STRING,
|
||||
ts TIMESTAMP,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (a, b, c, d)
|
||||
)
|
||||
PARTITION ON COLUMNS (c, b, a) (
|
||||
a < 10,
|
||||
10 >= a AND a < 20,
|
||||
20 >= a AND b < 100,
|
||||
20 >= a AND b > 100
|
||||
)
|
||||
```
|
||||
|
||||
## Combine with storage
|
||||
|
||||
Examining columns separately suits our columnar storage very well in two aspects.
|
||||
|
||||
1. The simple expression can be pushed down to storage and file format, and is likely to hit existing index. Makes pruning operation very efficient.
|
||||
|
||||
2. Columns in columnar storage are not tightly coupled like in the traditional row storages, which means we can easily add or remove columns from partition rule without much impact (like a global reshuffle) on data.
|
||||
|
||||
The data file itself can be "projected" to the key space as a polyhedron, it is guaranteed that each plane is in parallel with some coordinate planes (in a 2D scenario, this is saying that all the files can be projected to a rectangle). Thus partition or repartition also only need to consider related columns.
|
||||
|
||||

|
||||
|
||||
An additional limitation is that considering how the index works and how we organize the primary keys at present, the partition columns are limited to be a subset of primary keys for better performance.
|
||||
|
||||
# Drawbacks
|
||||
|
||||
This is a breaking change.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
@@ -66,7 +66,7 @@
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
@@ -2116,7 +2116,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -2126,7 +2126,7 @@
|
||||
"x": 0,
|
||||
"y": 61
|
||||
},
|
||||
"id": 12,
|
||||
"id": 17,
|
||||
"interval": "1s",
|
||||
"options": {
|
||||
"legend": {
|
||||
@@ -2147,8 +2147,8 @@
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
|
||||
"editorMode": "code",
|
||||
"expr": "rate(raft_engine_sync_log_duration_seconds_count[2s])",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": false,
|
||||
@@ -2158,7 +2158,7 @@
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "wal write size",
|
||||
"title": "raft engine sync count",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -2378,6 +2378,120 @@
|
||||
],
|
||||
"title": "raft engine write duration seconds",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 68
|
||||
},
|
||||
"id": 12,
|
||||
"interval": "1s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": false,
|
||||
"legendFormat": "req-size-p95",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "throughput",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "wal write size",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "10s",
|
||||
@@ -2387,13 +2501,13 @@
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-3h",
|
||||
"from": "now-30m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "GreptimeDB",
|
||||
"uid": "e7097237-669b-4f8d-b751-13067afbfb68",
|
||||
"version": 9,
|
||||
"version": 12,
|
||||
"weekStart": ""
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e -x
|
||||
set -ex
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
@@ -20,13 +19,27 @@ else
|
||||
GITHUB_URL="${GITHUB_PROXY_URL%/}"
|
||||
fi
|
||||
|
||||
function retry_fetch() {
|
||||
local url=$1
|
||||
local filename=$2
|
||||
|
||||
curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
|
||||
echo "Failed to download $url"
|
||||
echo "You may try to set http_proxy and https_proxy environment variables."
|
||||
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls ${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L ${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz" build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
|
||||
@@ -251,6 +251,12 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||
TableCacheNotGet { key: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||
GetTableCache { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -311,6 +317,7 @@ impl ErrorExt for Error {
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,9 +19,9 @@ mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
mod schemata;
|
||||
pub mod schemata;
|
||||
mod table_names;
|
||||
mod tables;
|
||||
pub mod tables;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
@@ -37,8 +37,8 @@ use crate::error::{
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const CATALOG_NAME: &str = "catalog_name";
|
||||
const SCHEMA_NAME: &str = "schema_name";
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
|
||||
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -39,10 +39,10 @@ use crate::error::{
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const TABLE_TYPE: &str = "table_type";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -82,12 +82,10 @@ impl CachedMetaKvBackendBuilder {
|
||||
let cache_ttl = self.cache_ttl.unwrap_or(DEFAULT_CACHE_TTL);
|
||||
let cache_tti = self.cache_tti.unwrap_or(DEFAULT_CACHE_TTI);
|
||||
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(cache_max_capacity)
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build(),
|
||||
);
|
||||
let cache = CacheBuilder::new(cache_max_capacity)
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build();
|
||||
|
||||
let kv_backend = Arc::new(MetaKvBackend {
|
||||
client: self.meta_client,
|
||||
@@ -104,7 +102,7 @@ impl CachedMetaKvBackendBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
|
||||
|
||||
/// A wrapper of `MetaKvBackend` with cache support.
|
||||
///
|
||||
@@ -117,7 +115,7 @@ pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
/// TTL and TTI for cache.
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
cache: CacheBackend,
|
||||
name: String,
|
||||
version: AtomicUsize,
|
||||
}
|
||||
@@ -317,12 +315,10 @@ impl CachedMetaKvBackend {
|
||||
// only for test
|
||||
#[cfg(test)]
|
||||
fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build(),
|
||||
);
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
@@ -333,7 +329,7 @@ impl CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
pub fn cache(&self) -> &CacheBackend {
|
||||
&self.cache
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,13 @@
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::error::Result as MetaResult;
|
||||
@@ -30,6 +34,7 @@ use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
@@ -38,9 +43,10 @@ use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
||||
use crate::error::{
|
||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu,
|
||||
Result as CatalogResult, TableMetadataManagerSnafu,
|
||||
Result as CatalogResult, TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
@@ -60,6 +66,7 @@ pub struct KvBackendCatalogManager {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
fn make_table(table_info_value: TableInfoValue) -> CatalogResult<TableRef> {
|
||||
@@ -79,13 +86,24 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
let table_cache_key = format_full_table_name(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
.await?;
|
||||
self.table_cache.invalidate(&table_cache_key).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_CACHED_CATALOG: u64 = 128;
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||
@@ -95,13 +113,16 @@ impl KvBackendCatalogManager {
|
||||
cache_invalidator,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(DEFAULT_CACHED_CATALOG),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
// The catalog name is not used in system_catalog, so let it empty
|
||||
String::default(),
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
table_cache: CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(TABLE_CACHE_TTL)
|
||||
.time_to_idle(TABLE_CACHE_TTI)
|
||||
.build(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -216,29 +237,52 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
let init = async {
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return Ok(None);
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
make_table(table_info_value)
|
||||
};
|
||||
make_table(table_info_value).map(Some)
|
||||
|
||||
match self
|
||||
.table_cache
|
||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
||||
.await
|
||||
{
|
||||
Ok(table) => Ok(Some(table)),
|
||||
Err(err) => match err.as_ref() {
|
||||
TableCacheNotGet { .. } => Ok(None),
|
||||
_ => Err(err),
|
||||
},
|
||||
}
|
||||
.map_err(|err| GetTableCache {
|
||||
err_msg: err.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn tables<'a>(
|
||||
|
||||
@@ -307,7 +307,7 @@ impl Database {
|
||||
reason: "Expect 'AffectedRows' Flight messages to be the one and the only!"
|
||||
}
|
||||
);
|
||||
Ok(Output::AffectedRows(rows))
|
||||
Ok(Output::new_with_affected_rows(rows))
|
||||
}
|
||||
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
|
||||
IllegalFlightMessagesSnafu {
|
||||
@@ -340,7 +340,7 @@ impl Database {
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
Ok(Output::new_stream(Box::pin(record_batch_stream)))
|
||||
Ok(Output::new_with_stream(Box::pin(record_batch_stream)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
pub use common_query::Output;
|
||||
pub use common_query::{Output, OutputData, OutputMeta};
|
||||
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ impl RegionRequester {
|
||||
|
||||
check_response_header(header)?;
|
||||
|
||||
Ok(affected_rows)
|
||||
Ok(affected_rows as _)
|
||||
}
|
||||
|
||||
pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
|
||||
|
||||
@@ -62,7 +62,9 @@ pub struct BenchTableMetadataCommand {
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
|
||||
|
||||
@@ -19,8 +19,7 @@ use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::api::v1::auth_header::AuthScheme;
|
||||
use client::api::v1::Basic;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
|
||||
use common_recordbatch::util::collect;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
@@ -142,7 +141,7 @@ impl Export {
|
||||
.with_context(|_| RequestDatabaseSnafu {
|
||||
sql: "show databases".to_string(),
|
||||
})?;
|
||||
let Output::Stream(stream, _) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
@@ -183,7 +182,7 @@ impl Export {
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream, _) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let Some(record_batch) = collect(stream)
|
||||
@@ -235,7 +234,7 @@ impl Export {
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream, _) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::time::Instant;
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_query::Output;
|
||||
@@ -184,15 +184,15 @@ impl Repl {
|
||||
}
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s, _) => {
|
||||
let either = match output.data {
|
||||
OutputData::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
OutputData::RecordBatches(x) => Either::Left(x),
|
||||
OutputData::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
@@ -70,7 +70,7 @@ impl UpgradeCommand {
|
||||
etcd_addr: &self.etcd_addr,
|
||||
})?;
|
||||
let tool = MigrateTableMetadata {
|
||||
etcd_store: EtcdStore::with_etcd_client(client),
|
||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
||||
dryrun: self.dryrun,
|
||||
skip_catalog_keys: self.skip_catalog_keys,
|
||||
skip_table_global_keys: self.skip_table_global_keys,
|
||||
|
||||
@@ -117,10 +117,12 @@ struct StartCommand {
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
|
||||
/// If it's not empty, the metasrv will store all data with this key prefix.
|
||||
#[clap(long, default_value = "")]
|
||||
store_key_prefix: String,
|
||||
/// The max operations per txn
|
||||
#[clap(long)]
|
||||
max_txn_ops: Option<usize>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -181,6 +183,10 @@ impl StartCommand {
|
||||
opts.store_key_prefix = self.store_key_prefix.clone()
|
||||
}
|
||||
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
opts.max_txn_ops = max_txn_ops;
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ pub mod readable_size;
|
||||
use core::any::Any;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
/// [`Plugins`] is a wrapper of Arc contents.
|
||||
|
||||
@@ -28,12 +28,15 @@ const REGION: &str = "region";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
|
||||
|
||||
pub fn is_supported_in_s3(key: &str) -> bool {
|
||||
key == ENDPOINT
|
||||
|| key == ACCESS_KEY_ID
|
||||
|| key == SECRET_ACCESS_KEY
|
||||
|| key == SESSION_TOKEN
|
||||
|| key == REGION
|
||||
|| key == ENABLE_VIRTUAL_HOST_STYLE
|
||||
[
|
||||
ENDPOINT,
|
||||
ACCESS_KEY_ID,
|
||||
SECRET_ACCESS_KEY,
|
||||
SESSION_TOKEN,
|
||||
REGION,
|
||||
ENABLE_VIRTUAL_HOST_STYLE,
|
||||
]
|
||||
.contains(&key)
|
||||
}
|
||||
|
||||
pub fn build_s3_backend(
|
||||
|
||||
@@ -12,6 +12,7 @@ api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait.workspace = true
|
||||
chrono-tz = "0.6"
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
@@ -33,6 +34,7 @@ serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_query::error::Result;
|
||||
use session::context::QueryContextRef;
|
||||
use table::requests::{DeleteRequest, InsertRequest};
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
|
||||
|
||||
/// A trait for handling table mutations in `QueryEngine`.
|
||||
#[async_trait]
|
||||
@@ -30,6 +30,28 @@ pub trait TableMutationHandler: Send + Sync {
|
||||
|
||||
/// Delete rows from the table.
|
||||
async fn delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a flush task for table.
|
||||
async fn flush(&self, request: FlushTableRequest, ctx: QueryContextRef)
|
||||
-> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a compaction task for table.
|
||||
async fn compact(
|
||||
&self,
|
||||
request: CompactTableRequest,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a flush task for a table region.
|
||||
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
|
||||
-> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a compaction task for a table region.
|
||||
async fn compact_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
}
|
||||
|
||||
/// A trait for handling procedure service requests in `QueryEngine`.
|
||||
|
||||
@@ -12,8 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -27,3 +31,15 @@ pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>)
|
||||
|
||||
Signature::one_of(sigs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
/// Cast a [`ValueRef`] to u64, returns `None` if fails
|
||||
pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
cast((*value).into(), &ConcreteDataType::uint64_datatype())
|
||||
.context(InvalidInputTypeSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to cast input into uint64, actual type: {:#?}",
|
||||
value.data_type(),
|
||||
),
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod clamp;
|
||||
mod modulo;
|
||||
mod pow;
|
||||
mod rate;
|
||||
@@ -19,6 +20,7 @@ mod rate;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use clamp::ClampFunction;
|
||||
use common_query::error::{GeneralDataFusionSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::error::DataFusionError;
|
||||
@@ -40,7 +42,8 @@ impl MathFunction {
|
||||
registry.register(Arc::new(ModuloFunction));
|
||||
registry.register(Arc::new(PowFunction));
|
||||
registry.register(Arc::new(RateFunction));
|
||||
registry.register(Arc::new(RangeFunction))
|
||||
registry.register(Arc::new(RangeFunction));
|
||||
registry.register(Arc::new(ClampFunction));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
403
src/common/function/src/scalars/math/clamp.rs
Normal file
403
src/common/function/src/scalars/math/clamp.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::arrow::array::{ArrayIter, PrimitiveArray};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::value::TryAsPrimitive;
|
||||
use datatypes::vectors::PrimitiveVector;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ClampFunction;
|
||||
|
||||
const CLAMP_NAME: &str = "clamp";
|
||||
|
||||
impl Function for ClampFunction {
|
||||
fn name(&self) -> &str {
|
||||
CLAMP_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// Type check is done by `signature`
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// input, min, max
|
||||
Signature::uniform(3, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: crate::function::FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 3, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type().is_numeric(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The first arg's type is not numeric, have: {}",
|
||||
columns[0].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type() == columns[1].data_type()
|
||||
&& columns[1].data_type() == columns[2].data_type(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Arguments don't have identical types: {}, {}, {}",
|
||||
columns[0].data_type(),
|
||||
columns[1].data_type(),
|
||||
columns[2].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1 && columns[2].len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second and third args should be scalar, have: {:?}, {:?}",
|
||||
columns[1], columns[2]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
let input_array = columns[0].to_arrow_array();
|
||||
let input = input_array
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
|
||||
.unwrap();
|
||||
|
||||
let min = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The second arg should not be none",
|
||||
}
|
||||
})?;
|
||||
let max = TryAsPrimitive::<$S>::try_as_primitive(&columns[2].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The third arg should not be none",
|
||||
}
|
||||
})?;
|
||||
|
||||
// ensure min <= max
|
||||
ensure!(
|
||||
min <= max,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg should be less than or equal to the third arg, have: {:?}, {:?}",
|
||||
columns[1], columns[2]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
clamp_impl::<$S, true, true>(input, min, max)
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ClampFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", CLAMP_NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: bool>(
|
||||
input: &PrimitiveArray<T::ArrowPrimitive>,
|
||||
min: T::Native,
|
||||
max: T::Native,
|
||||
) -> Result<VectorRef> {
|
||||
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
|
||||
|
||||
let iter = ArrayIter::new(input);
|
||||
let result = iter.map(|x| {
|
||||
x.map(|x| {
|
||||
if CLAMP_MIN && x < min {
|
||||
min
|
||||
} else if CLAMP_MAX && x > max {
|
||||
max
|
||||
} else {
|
||||
x
|
||||
}
|
||||
})
|
||||
});
|
||||
let result = PrimitiveArray::<T::ArrowPrimitive>::from_iter(result);
|
||||
Ok(Arc::new(PrimitiveVector::<T>::from(result)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64Vector, Int64Vector, StringVector, UInt64Vector,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn clamp_i64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
-1,
|
||||
10,
|
||||
vec![Some(-1), Some(-1), Some(-1), Some(0), Some(1), Some(2)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
0,
|
||||
0,
|
||||
vec![Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), None, Some(-1), None, None, Some(2)],
|
||||
-2,
|
||||
1,
|
||||
vec![Some(-2), None, Some(-1), None, None, Some(1)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0,
|
||||
1,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Int64Vector::from(in_data)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_u64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(0), Some(1), Some(2), Some(3), Some(4), Some(5)],
|
||||
1,
|
||||
3,
|
||||
vec![Some(1), Some(1), Some(2), Some(3), Some(3), Some(3)],
|
||||
),
|
||||
(
|
||||
vec![Some(0), Some(1), Some(2), Some(3), Some(4), Some(5)],
|
||||
0,
|
||||
0,
|
||||
vec![Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)],
|
||||
),
|
||||
(
|
||||
vec![Some(0), None, Some(2), None, None, Some(5)],
|
||||
1,
|
||||
3,
|
||||
vec![Some(1), None, Some(2), None, None, Some(3)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0,
|
||||
1,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(UInt64Vector::from(in_data)) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(UInt64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_f64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
-1.0,
|
||||
10.0,
|
||||
vec![Some(-1.0), Some(-1.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
0.0,
|
||||
0.0,
|
||||
vec![Some(0.0), Some(0.0), Some(0.0), Some(0.0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3.0), None, Some(-1.0), None, None, Some(2.0)],
|
||||
-2.0,
|
||||
1.0,
|
||||
vec![Some(-2.0), None, Some(-1.0), None, None, Some(1.0)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0.0,
|
||||
1.0,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(in_data)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_const_i32() {
|
||||
let input = vec![Some(5)];
|
||||
let min = 2;
|
||||
let max = 4;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(ConstantVector::new(Arc::new(Int64Vector::from(input)), 1)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(vec![Some(4)]));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_invalid_min_max() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = 10.0;
|
||||
let max = -1.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_type_not_match() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -1;
|
||||
let max = 10;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_min_is_not_scalar() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -10.0;
|
||||
let max = 1.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min, min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_no_max() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -10.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_on_string() {
|
||||
let input = vec![Some("foo"), Some("foo"), Some("foo"), Some("foo")];
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(StringVector::from(input)) as _,
|
||||
Arc::new(StringVector::from_vec(vec!["bar"])) as _,
|
||||
Arc::new(StringVector::from_vec(vec!["baz"])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -128,7 +128,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: i64 = (nums[i] % divs[i]) as i64;
|
||||
assert!(matches!(result.get(i), Value::Int64(v) if v == p));
|
||||
}
|
||||
@@ -160,7 +160,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: u64 = (nums[i] % divs[i]) as u64;
|
||||
assert!(matches!(result.get(i), Value::UInt64(v) if v == p));
|
||||
}
|
||||
@@ -192,7 +192,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: f64 = nums[i] % divs[i];
|
||||
assert!(matches!(result.get(i), Value::Float64(v) if v == p));
|
||||
}
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_timezone;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_timezone::ToTimezoneFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -25,6 +27,7 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToTimezoneFunction));
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
|
||||
260
src/common/function/src/scalars/timestamp/to_timezone.rs
Normal file
260
src/common/function/src/scalars/timestamp/to_timezone.rs
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use common_time::{Timestamp, Timezone};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToTimezoneFunction;
|
||||
|
||||
const NAME: &str = "to_timezone";
|
||||
|
||||
fn convert_to_timezone(arg: &str) -> Option<Timezone> {
|
||||
Timezone::from_tz_string(arg).ok()
|
||||
}
|
||||
|
||||
fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
|
||||
match arg {
|
||||
Value::Timestamp(ts) => Some(*ts),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToTimezoneFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "TO_TIMEZONE")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToTimezoneFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// type checked by signature - MUST BE timestamp
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
// TODO: maybe support epoch timestamp? https://github.com/GreptimeTeam/greptimedb/issues/3477
|
||||
let ts = columns[0].data_type().as_timestamp().with_context(|| {
|
||||
UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
})?;
|
||||
let array = columns[0].to_arrow_array();
|
||||
let times = match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
let tzs = {
|
||||
let array = columns[1].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array)
|
||||
.ok()
|
||||
.with_context(|| UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
})?;
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timezone(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let result = times
|
||||
.iter()
|
||||
.zip(tzs.iter())
|
||||
.map(|(time, tz)| match (time, tz) {
|
||||
(Some(time), _) => Some(time.to_timezone_aware_string(tz.as_ref())),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<Option<String>>>();
|
||||
Ok(Arc::new(StringVector::from(result)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::{
|
||||
TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
|
||||
};
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
assert_eq!("to_timezone", f.name());
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:01"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:01"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampSecondVector =
|
||||
TimestampSecondVector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![Some("America/New_York"), None, Some("Europe/Moscow"), None];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMillisecond>> = vec![
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMillisecondVector =
|
||||
TimestampMillisecondVector::from_owned_iterator(times.into_iter());
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMicrosecond>> = vec![
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMicrosecondVector =
|
||||
TimestampMicrosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampNanosecond>> = vec![
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampNanosecondVector =
|
||||
TimestampNanosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
}
|
||||
@@ -32,11 +32,19 @@ impl FunctionState {
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_query::error::Result;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{
|
||||
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
};
|
||||
|
||||
use crate::handlers::ProcedureServiceHandler;
|
||||
use crate::handlers::{ProcedureServiceHandler, TableMutationHandler};
|
||||
struct MockProcedureServiceHandler;
|
||||
struct MockTableMutationHandler;
|
||||
const ROWS: usize = 42;
|
||||
|
||||
#[async_trait]
|
||||
impl ProcedureServiceHandler for MockProcedureServiceHandler {
|
||||
@@ -56,8 +64,59 @@ impl FunctionState {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TableMutationHandler for MockTableMutationHandler {
|
||||
async fn insert(
|
||||
&self,
|
||||
_request: InsertRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn delete(
|
||||
&self,
|
||||
_request: DeleteRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn flush(
|
||||
&self,
|
||||
_request: FlushTableRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn compact(
|
||||
&self,
|
||||
_request: CompactTableRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn flush_region(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn compact_region(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
table_mutation_handler: None,
|
||||
table_mutation_handler: Some(Arc::new(MockTableMutationHandler)),
|
||||
procedure_service_handler: Some(Arc::new(MockProcedureServiceHandler)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::ProcedureStateResponse;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
@@ -25,24 +25,14 @@ use common_query::error::{
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::{ConstantVector, Helper, StringVector, VectorRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use serde::Serialize;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "procedure_state";
|
||||
|
||||
/// A function to query procedure state by its id.
|
||||
/// Such as `procedure_state(pid)`.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ProcedureStateFunction;
|
||||
|
||||
impl fmt::Display for ProcedureStateFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "PROCEDURE_STATE")
|
||||
}
|
||||
}
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ProcedureStateJson {
|
||||
@@ -51,105 +41,58 @@ struct ProcedureStateJson {
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
impl Function for ProcedureStateFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
crate::ensure_greptime!(func_ctx);
|
||||
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let pids = columns[0].clone();
|
||||
let expect_len = pids.len();
|
||||
let is_const = pids.is_const();
|
||||
|
||||
match pids.data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
// TODO(dennis): datafusion UDF doesn't support async function currently
|
||||
std::thread::spawn(move || {
|
||||
let pids: &StringVector = if is_const {
|
||||
let pids: &ConstantVector = unsafe { Helper::static_cast(&pids) };
|
||||
unsafe { Helper::static_cast(pids.inner()) }
|
||||
} else {
|
||||
unsafe { Helper::static_cast(&pids) }
|
||||
};
|
||||
|
||||
let procedure_service_handler = func_ctx
|
||||
.state
|
||||
.procedure_service_handler
|
||||
.as_ref()
|
||||
.context(MissingProcedureServiceHandlerSnafu)?;
|
||||
|
||||
let states = pids
|
||||
.iter_data()
|
||||
.map(|pid| {
|
||||
if let Some(pid) = pid {
|
||||
let ProcedureStateResponse { status, error, .. } =
|
||||
common_runtime::block_on_read(async move {
|
||||
procedure_service_handler.query_procedure_state(pid).await
|
||||
})?;
|
||||
|
||||
let status = ProcedureStatus::try_from(status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown");
|
||||
|
||||
let state = ProcedureStateJson {
|
||||
status: status.to_string(),
|
||||
error: if error.is_empty() { None } else { Some(error) },
|
||||
};
|
||||
|
||||
Ok(Some(serde_json::to_string(&state).unwrap_or_default()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let results: VectorRef = Arc::new(StringVector::from(states));
|
||||
|
||||
if is_const {
|
||||
Ok(Arc::new(ConstantVector::new(results, expect_len)) as _)
|
||||
} else {
|
||||
Ok(results)
|
||||
}
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
error!(e; "Join thread error");
|
||||
ThreadJoin {
|
||||
location: Location::default(),
|
||||
}
|
||||
})?
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
/// A function to query procedure state by its id.
|
||||
/// Such as `procedure_state(pid)`.
|
||||
#[admin_fn(
|
||||
name = "ProcedureStateFunction",
|
||||
display_name = "procedure_state",
|
||||
sig_fn = "signature",
|
||||
ret = "string"
|
||||
)]
|
||||
pub(crate) async fn procedure_state(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(pid) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "procedure_state",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let ProcedureStateResponse { status, error, .. } =
|
||||
procedure_service_handler.query_procedure_state(pid).await?;
|
||||
let status = ProcedureStatus::try_from(status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown");
|
||||
|
||||
let state = ProcedureStateJson {
|
||||
status: status.to_string(),
|
||||
error: if error.is_empty() { None } else { Some(error) },
|
||||
};
|
||||
let json = serde_json::to_string(&state).unwrap_or_default();
|
||||
|
||||
Ok(Value::from(json))
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,10 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -27,5 +31,9 @@ impl TableFunction {
|
||||
/// Register all table functions to [`FunctionRegistry`].
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(MigrateRegionFunction));
|
||||
registry.register(Arc::new(FlushRegionFunction));
|
||||
registry.register(Arc::new(CompactRegionFunction));
|
||||
registry.register(Arc::new(FlushTableFunction));
|
||||
registry.register(Arc::new(CompactTableFunction));
|
||||
}
|
||||
}
|
||||
|
||||
148
src/common/function/src/table/flush_compact_region.rs
Normal file
148
src/common/function/src/table/flush_compact_region.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
macro_rules! define_region_function {
|
||||
($name: expr, $display_name_str: expr, $display_name: ident) => {
|
||||
/// A function to $display_name
|
||||
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = "signature", ret = "uint64")]
|
||||
pub(crate) async fn $display_name(
|
||||
table_mutation_handler: &TableMutationHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let Some(region_id) = cast_u64(¶ms[0])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: $display_name_str,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let affected_rows = table_mutation_handler
|
||||
.$display_name(RegionId::from_u64(region_id), query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
Ok(Value::from(affected_rows as u64))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
define_region_function!("FlushRegionFunction", "flush_region", flush_region);
|
||||
|
||||
define_region_function!("CompactRegionFunction", "compact_region", compact_region);
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::uniform(1, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::UInt64Vector;
|
||||
|
||||
use super::*;
|
||||
|
||||
macro_rules! define_region_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
paste::paste! {
|
||||
#[test]
|
||||
fn [<test_ $name _misc>]() {
|
||||
let f = $func;
|
||||
assert_eq!(stringify!($name), f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == ConcreteDataType::numerics()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
|
||||
let args = vec![99];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
|
||||
|
||||
let args = vec![99];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
define_region_function_test!(flush_region, FlushRegionFunction);
|
||||
|
||||
define_region_function_test!(compact_region, CompactRegionFunction);
|
||||
}
|
||||
178
src/common/function/src/table/flush_compact_table.rs
Normal file
178
src/common/function/src/table/flush_compact_table.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::{ensure, Location, OptionExt, ResultExt};
|
||||
use table::requests::{CompactTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
|
||||
macro_rules! define_table_function {
|
||||
($name: expr, $display_name_str: expr, $display_name: ident, $func: ident, $request: ident) => {
|
||||
/// A function to $func table, such as `$display_name(table_name)`.
|
||||
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = "signature", ret = "uint64")]
|
||||
pub(crate) async fn $display_name(
|
||||
table_mutation_handler: &TableMutationHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(table_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: $display_name_str,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_name_to_full_name(table_name, &query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TableMutationSnafu)?;
|
||||
|
||||
let affected_rows = table_mutation_handler
|
||||
.$func(
|
||||
$request {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
},
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Value::from(affected_rows as u64))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
define_table_function!(
|
||||
"FlushTableFunction",
|
||||
"flush_table",
|
||||
flush_table,
|
||||
flush,
|
||||
FlushTableRequest
|
||||
);
|
||||
|
||||
define_table_function!(
|
||||
"CompactTableFunction",
|
||||
"compact_table",
|
||||
compact_table,
|
||||
compact,
|
||||
CompactTableRequest
|
||||
);
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector};
|
||||
|
||||
use super::*;
|
||||
|
||||
macro_rules! define_table_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
paste::paste!{
|
||||
#[test]
|
||||
fn [<test_ $name _misc>]() {
|
||||
let f = $func;
|
||||
assert_eq!(stringify!($name), f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
|
||||
let args = vec!["test"];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
|
||||
|
||||
let args = vec!["test"];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_table_function_test!(flush_table, FlushTableFunction);
|
||||
|
||||
define_table_function_test!(compact_table, CompactTableFunction);
|
||||
}
|
||||
@@ -15,19 +15,25 @@
|
||||
use std::fmt::{self};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::MigrateRegionRequest;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, InvalidInputTypeSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
};
|
||||
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::logging::error;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{Location, OptionExt, ResultExt};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{Location, OptionExt};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
const DEFAULT_REPLAY_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
/// A function to migrate a region from source peer to target peer.
|
||||
/// Returns the submitted procedure id if success. Only available in cluster mode.
|
||||
@@ -39,137 +45,82 @@ use crate::function::{Function, FunctionContext};
|
||||
/// - `region_id`: the region id
|
||||
/// - `from_peer`: the source peer id
|
||||
/// - `to_peer`: the target peer id
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MigrateRegionFunction;
|
||||
#[admin_fn(
|
||||
name = "MigrateRegionFunction",
|
||||
display_name = "migrate_region",
|
||||
sig_fn = "signature",
|
||||
ret = "string"
|
||||
)]
|
||||
pub(crate) async fn migrate_region(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (region_id, from_peer, to_peer, replay_timeout) = match params.len() {
|
||||
3 => {
|
||||
let region_id = cast_u64(¶ms[0])?;
|
||||
let from_peer = cast_u64(¶ms[1])?;
|
||||
let to_peer = cast_u64(¶ms[2])?;
|
||||
|
||||
const NAME: &str = "migrate_region";
|
||||
const DEFAULT_REPLAY_TIMEOUT_SECS: u64 = 10;
|
||||
(
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
Some(DEFAULT_REPLAY_TIMEOUT_SECS),
|
||||
)
|
||||
}
|
||||
|
||||
fn cast_u64_vector(vector: &VectorRef) -> Result<VectorRef> {
|
||||
vector
|
||||
.cast(&ConcreteDataType::uint64_datatype())
|
||||
.context(InvalidInputTypeSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to cast input into uint64, actual type: {:#?}",
|
||||
vector.data_type(),
|
||||
),
|
||||
})
|
||||
}
|
||||
4 => {
|
||||
let region_id = cast_u64(¶ms[0])?;
|
||||
let from_peer = cast_u64(¶ms[1])?;
|
||||
let to_peer = cast_u64(¶ms[2])?;
|
||||
let replay_timeout = cast_u64(¶ms[3])?;
|
||||
|
||||
impl Function for MigrateRegionFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
(region_id, from_peer, to_peer, replay_timeout)
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// migrate_region(region_id, from_peer, to_peer)
|
||||
TypeSignature::Uniform(3, ConcreteDataType::numerics()),
|
||||
// migrate_region(region_id, from_peer, to_peer, timeout(secs))
|
||||
TypeSignature::Uniform(4, ConcreteDataType::numerics()),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
crate::ensure_greptime!(func_ctx);
|
||||
|
||||
let (region_ids, from_peers, to_peers, replay_timeouts) = match columns.len() {
|
||||
3 => {
|
||||
let region_ids = cast_u64_vector(&columns[0])?;
|
||||
let from_peers = cast_u64_vector(&columns[1])?;
|
||||
let to_peers = cast_u64_vector(&columns[2])?;
|
||||
|
||||
(region_ids, from_peers, to_peers, None)
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 3 or 4, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
4 => {
|
||||
let region_ids = cast_u64_vector(&columns[0])?;
|
||||
let from_peers = cast_u64_vector(&columns[1])?;
|
||||
let to_peers = cast_u64_vector(&columns[2])?;
|
||||
let replay_timeouts = cast_u64_vector(&columns[3])?;
|
||||
match (region_id, from_peer, to_peer, replay_timeout) {
|
||||
(Some(region_id), Some(from_peer), Some(to_peer), Some(replay_timeout)) => {
|
||||
let pid = procedure_service_handler
|
||||
.migrate_region(MigrateRegionRequest {
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
replay_timeout: Duration::from_secs(replay_timeout),
|
||||
})
|
||||
.await?;
|
||||
|
||||
(region_ids, from_peers, to_peers, Some(replay_timeouts))
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 3 or 4, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
// TODO(dennis): datafusion UDF doesn't support async function currently
|
||||
std::thread::spawn(move || {
|
||||
let len = region_ids.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(len);
|
||||
let procedure_service_handler = func_ctx
|
||||
.state
|
||||
.procedure_service_handler
|
||||
.as_ref()
|
||||
.context(MissingProcedureServiceHandlerSnafu)?;
|
||||
|
||||
for index in 0..len {
|
||||
let region_id = region_ids.get(index);
|
||||
let from_peer = from_peers.get(index);
|
||||
let to_peer = to_peers.get(index);
|
||||
let replay_timeout = match &replay_timeouts {
|
||||
Some(replay_timeouts) => replay_timeouts.get(index),
|
||||
None => Value::UInt64(DEFAULT_REPLAY_TIMEOUT_SECS),
|
||||
};
|
||||
|
||||
match (region_id, from_peer, to_peer, replay_timeout) {
|
||||
(
|
||||
Value::UInt64(region_id),
|
||||
Value::UInt64(from_peer),
|
||||
Value::UInt64(to_peer),
|
||||
Value::UInt64(replay_timeout),
|
||||
) => {
|
||||
let pid = common_runtime::block_on_read(async move {
|
||||
procedure_service_handler
|
||||
.migrate_region(MigrateRegionRequest {
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
replay_timeout: Duration::from_secs(replay_timeout),
|
||||
})
|
||||
.await
|
||||
})?;
|
||||
|
||||
results.push(pid.as_deref())
|
||||
}
|
||||
_ => {
|
||||
results.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
error!(e; "Join thread error");
|
||||
ThreadJoin {
|
||||
location: Location::default(),
|
||||
}
|
||||
})?
|
||||
_ => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MigrateRegionFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "MIGRATE_REGION")
|
||||
}
|
||||
fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// migrate_region(region_id, from_peer, to_peer)
|
||||
TypeSignature::Uniform(3, ConcreteDataType::numerics()),
|
||||
// migrate_region(region_id, from_peer, to_peer, timeout(secs))
|
||||
TypeSignature::Uniform(4, ConcreteDataType::numerics()),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -50,8 +50,12 @@ pub struct FlightEncoder {
|
||||
|
||||
impl Default for FlightEncoder {
|
||||
fn default() -> Self {
|
||||
let write_options = writer::IpcWriteOptions::default()
|
||||
.try_with_compression(Some(arrow::ipc::CompressionType::LZ4_FRAME))
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
write_options: writer::IpcWriteOptions::default(),
|
||||
write_options,
|
||||
data_gen: writer::IpcDataGenerator::default(),
|
||||
dictionary_tracker: writer::DictionaryTracker::new(false),
|
||||
}
|
||||
|
||||
236
src/common/macro/src/admin_fn.rs
Normal file
236
src/common/macro/src/admin_fn.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::{
|
||||
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypePath,
|
||||
TypeReference, Visibility,
|
||||
};
|
||||
|
||||
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
|
||||
|
||||
/// Internal util macro to early return on error.
|
||||
macro_rules! ok {
|
||||
($item:expr) => {
|
||||
match $item {
|
||||
Ok(item) => item,
|
||||
Err(e) => return e.into_compile_error().into(),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Internal util macro to create an error.
|
||||
macro_rules! error {
|
||||
($span:expr, $msg: expr) => {
|
||||
Err(syn::Error::new($span, $msg))
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
let mut result = TokenStream::new();
|
||||
|
||||
// extract arg map
|
||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
||||
let arg_span = arg_pairs[0].span();
|
||||
let arg_map = ok!(extract_arg_map(arg_pairs));
|
||||
|
||||
// decompose the fn block
|
||||
let compute_fn = parse_macro_input!(input as ItemFn);
|
||||
let ItemFn {
|
||||
attrs,
|
||||
vis,
|
||||
sig,
|
||||
block,
|
||||
} = compute_fn;
|
||||
|
||||
// extract fn arg list
|
||||
let Signature {
|
||||
inputs,
|
||||
ident: fn_name,
|
||||
..
|
||||
} = &sig;
|
||||
|
||||
let arg_types = ok!(extract_input_types(inputs));
|
||||
if arg_types.len() < 2 {
|
||||
ok!(error!(
|
||||
sig.span(),
|
||||
"Expect at least two argument for admin fn: (handler, query_ctx)"
|
||||
));
|
||||
}
|
||||
let handler_type = ok!(extract_handler_type(&arg_types));
|
||||
|
||||
// build the struct and its impl block
|
||||
// only do this when `display_name` is specified
|
||||
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
fn_name,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
ok!(get_ident(&arg_map, "sig_fn", arg_span)),
|
||||
ok!(get_ident(&arg_map, "ret", arg_span)),
|
||||
handler_type,
|
||||
display_name,
|
||||
);
|
||||
result.extend(struct_code);
|
||||
}
|
||||
|
||||
// preserve this fn
|
||||
let input_fn_code: TokenStream = quote! {
|
||||
#sig { #block }
|
||||
}
|
||||
.into();
|
||||
|
||||
result.extend(input_fn_code);
|
||||
result
|
||||
}
|
||||
|
||||
/// Retrieve the handler type, `ProcedureServiceHandlerRef` or `TableMutationHandlerRef`.
|
||||
fn extract_handler_type(arg_types: &[Type]) -> Result<&Ident, syn::Error> {
|
||||
match &arg_types[0] {
|
||||
Type::Reference(TypeReference { elem, .. }) => match &**elem {
|
||||
Type::Path(TypePath { path, .. }) => Ok(&path
|
||||
.segments
|
||||
.first()
|
||||
.expect("Expected a reference of handler")
|
||||
.ident),
|
||||
other => {
|
||||
error!(other.span(), "Expected a reference of handler")
|
||||
}
|
||||
},
|
||||
other => {
|
||||
error!(other.span(), "Expected a reference of handler")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the function struct
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn build_struct(
|
||||
attrs: Vec<Attribute>,
|
||||
vis: Visibility,
|
||||
fn_name: &Ident,
|
||||
name: Ident,
|
||||
sig_fn: Ident,
|
||||
ret: Ident,
|
||||
handler_type: &Ident,
|
||||
display_name_ident: Ident,
|
||||
) -> TokenStream {
|
||||
let display_name = display_name_ident.to_string();
|
||||
let ret = Ident::new(&format!("{ret}_datatype"), ret.span());
|
||||
let uppcase_display_name = display_name.to_uppercase();
|
||||
// Get the handler name in function state by the argument ident
|
||||
let (handler, snafu_type) = match handler_type.to_string().as_str() {
|
||||
"ProcedureServiceHandlerRef" => (
|
||||
Ident::new("procedure_service_handler", handler_type.span()),
|
||||
Ident::new("MissingProcedureServiceHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
|
||||
"TableMutationHandlerRef" => (
|
||||
Ident::new("table_mutation_handler", handler_type.span()),
|
||||
Ident::new("MissingTableMutationHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
handler => ok!(error!(
|
||||
handler_type.span(),
|
||||
format!("Unknown handler type: {handler}")
|
||||
)),
|
||||
};
|
||||
|
||||
quote! {
|
||||
#(#attrs)*
|
||||
#[derive(Debug)]
|
||||
#vis struct #name;
|
||||
|
||||
impl fmt::Display for #name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, #uppcase_display_name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Function for #name {
|
||||
fn name(&self) -> &'static str {
|
||||
#display_name
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::#ret())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
#sig_fn()
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
// Ensure under the `greptime` catalog for security
|
||||
ensure_greptime!(func_ctx);
|
||||
|
||||
let columns_num = columns.len();
|
||||
let rows_num = if columns.is_empty() {
|
||||
1
|
||||
} else {
|
||||
columns[0].len()
|
||||
};
|
||||
let columns = Vec::from(columns);
|
||||
|
||||
// TODO(dennis): DataFusion doesn't support async UDF currently
|
||||
std::thread::spawn(move || {
|
||||
let query_ctx = &func_ctx.query_ctx;
|
||||
let handler = func_ctx
|
||||
.state
|
||||
.#handler
|
||||
.as_ref()
|
||||
.context(#snafu_type)?;
|
||||
|
||||
let mut builder = ConcreteDataType::#ret()
|
||||
.create_mutable_vector(rows_num);
|
||||
|
||||
if columns_num == 0 {
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
#fn_name(handler, query_ctx, &[]).await
|
||||
})?;
|
||||
|
||||
builder.push_value_ref(result.as_value_ref());
|
||||
} else {
|
||||
for i in 0..rows_num {
|
||||
let args: Vec<_> = columns.iter()
|
||||
.map(|vector| vector.get_ref(i))
|
||||
.collect();
|
||||
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
#fn_name(handler, query_ctx, &args).await
|
||||
})?;
|
||||
|
||||
builder.push_value_ref(result.as_value_ref());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(builder.to_vector())
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
error!(e; "Join thread error");
|
||||
ThreadJoin {
|
||||
location: Location::default(),
|
||||
}
|
||||
})?
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
.into()
|
||||
}
|
||||
@@ -12,17 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod admin_fn;
|
||||
mod aggr_func;
|
||||
mod print_caller;
|
||||
mod range_fn;
|
||||
mod stack_trace_debug;
|
||||
|
||||
mod utils;
|
||||
use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
|
||||
use print_caller::process_print_caller;
|
||||
use proc_macro::TokenStream;
|
||||
use range_fn::process_range_fn;
|
||||
use syn::{parse_macro_input, DeriveInput};
|
||||
|
||||
use crate::admin_fn::process_admin_fn;
|
||||
|
||||
/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
|
||||
/// This derive macro is expect to be used along with attribute macro [macro@as_aggr_func_creator].
|
||||
#[proc_macro_derive(AggrFuncTypeStore)]
|
||||
@@ -68,6 +71,25 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_range_fn(args, input)
|
||||
}
|
||||
|
||||
/// Attribute macro to convert a normal function to SQL administration function. The annotated function
|
||||
/// should accept:
|
||||
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` as the first argument,
|
||||
/// - `&QueryContextRef` as the second argument, and
|
||||
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
|
||||
/// Return type must be `common_query::error::Result<Value>`.
|
||||
///
|
||||
/// # Example see `common/function/src/system/procedure_state.rs`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `name`: The name of the generated `Function` implementation.
|
||||
/// - `ret`: The return type of the generated SQL function, it will be transformed into `ConcreteDataType::{ret}_datatype()` result.
|
||||
/// - `display_name`: The display name of the generated SQL function.
|
||||
/// - `sig_fn`: the function to returns `Signature` of generated `Function`.
|
||||
#[proc_macro_attribute]
|
||||
pub fn admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_admin_fn(args, input)
|
||||
}
|
||||
|
||||
/// Attribute macro to print the caller to the annotated function.
|
||||
/// The caller is printed as its filename and the call site line number.
|
||||
///
|
||||
|
||||
@@ -12,20 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use proc_macro2::Span;
|
||||
use quote::quote;
|
||||
use syn::punctuated::Punctuated;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::token::Comma;
|
||||
use syn::{
|
||||
parse_macro_input, Attribute, AttributeArgs, FnArg, Ident, ItemFn, Meta, MetaNameValue,
|
||||
NestedMeta, Signature, Type, TypeReference, Visibility,
|
||||
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypeReference,
|
||||
Visibility,
|
||||
};
|
||||
|
||||
/// Internal util macro to early return on error.
|
||||
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
|
||||
|
||||
macro_rules! ok {
|
||||
($item:expr) => {
|
||||
match $item {
|
||||
@@ -89,48 +85,6 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
||||
result
|
||||
}
|
||||
|
||||
/// Extract a String <-> Ident map from the attribute args.
|
||||
fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
|
||||
args.into_iter()
|
||||
.map(|meta| {
|
||||
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
|
||||
let name = path.get_ident().unwrap().to_string();
|
||||
let ident = match lit {
|
||||
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
|
||||
_ => Err(syn::Error::new(
|
||||
lit.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
)),
|
||||
}?;
|
||||
Ok((name, ident))
|
||||
} else {
|
||||
Err(syn::Error::new(
|
||||
meta.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
|
||||
}
|
||||
|
||||
/// Helper function to get an Ident from the previous arg map.
|
||||
fn get_ident(map: &HashMap<String, Ident>, key: &str, span: Span) -> Result<Ident, syn::Error> {
|
||||
map.get(key)
|
||||
.cloned()
|
||||
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
|
||||
}
|
||||
|
||||
/// Extract the argument list from the annotated function.
|
||||
fn extract_input_types(inputs: &Punctuated<FnArg, Comma>) -> Result<Vec<Type>, syn::Error> {
|
||||
inputs
|
||||
.iter()
|
||||
.map(|arg| match arg {
|
||||
FnArg::Receiver(receiver) => Err(syn::Error::new(receiver.span(), "expected bool")),
|
||||
FnArg::Typed(pat_type) => Ok(*pat_type.ty.clone()),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn build_struct(
|
||||
attrs: Vec<Attribute>,
|
||||
vis: Visibility,
|
||||
@@ -214,7 +168,7 @@ fn build_calc_fn(
|
||||
|
||||
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.to_data().into())?; )*
|
||||
|
||||
// TODO(ruihang): add ensure!()
|
||||
// TODO(ruihang): add ensure!()
|
||||
|
||||
let mut result_array = Vec::new();
|
||||
for index in 0..#first_range_array_name.len(){
|
||||
|
||||
69
src/common/macro/src/utils.rs
Normal file
69
src/common/macro/src/utils.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proc_macro2::Span;
|
||||
use syn::punctuated::Punctuated;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::token::Comma;
|
||||
use syn::{FnArg, Ident, Meta, MetaNameValue, NestedMeta, Type};
|
||||
|
||||
/// Extract a String <-> Ident map from the attribute args.
|
||||
pub(crate) fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
|
||||
args.into_iter()
|
||||
.map(|meta| {
|
||||
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
|
||||
let name = path.get_ident().unwrap().to_string();
|
||||
let ident = match lit {
|
||||
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
|
||||
_ => Err(syn::Error::new(
|
||||
lit.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
)),
|
||||
}?;
|
||||
Ok((name, ident))
|
||||
} else {
|
||||
Err(syn::Error::new(
|
||||
meta.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
|
||||
}
|
||||
|
||||
/// Helper function to get an Ident from the previous arg map.
|
||||
pub(crate) fn get_ident(
|
||||
map: &HashMap<String, Ident>,
|
||||
key: &str,
|
||||
span: Span,
|
||||
) -> Result<Ident, syn::Error> {
|
||||
map.get(key)
|
||||
.cloned()
|
||||
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
|
||||
}
|
||||
|
||||
/// Extract the argument list from the annotated function.
|
||||
pub(crate) fn extract_input_types(
|
||||
inputs: &Punctuated<FnArg, Comma>,
|
||||
) -> Result<Vec<Type>, syn::Error> {
|
||||
inputs
|
||||
.iter()
|
||||
.map(|arg| match arg {
|
||||
FnArg::Receiver(receiver) => Err(syn::Error::new(receiver.span(), "expected bool")),
|
||||
FnArg::Typed(pat_type) => Ok(*pat_type.ty.clone()),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -18,6 +18,7 @@ async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
|
||||
@@ -15,23 +15,25 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
pub use common_base::AffectedRows;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::peer::Peer;
|
||||
|
||||
pub type AffectedRows = u64;
|
||||
|
||||
/// The trait for handling requests to datanode.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Datanode: Send + Sync {
|
||||
/// Handles DML, and DDL requests.
|
||||
async fn handle(&self, request: RegionRequest) -> Result<AffectedRows>;
|
||||
|
||||
/// Handles query requests
|
||||
async fn handle_query(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
|
||||
}
|
||||
|
||||
pub type DatanodeRef = Arc<dyn Datanode>;
|
||||
|
||||
/// Datanode manager
|
||||
#[async_trait::async_trait]
|
||||
pub trait DatanodeManager: Send + Sync {
|
||||
/// Retrieves a target `datanode`.
|
||||
|
||||
@@ -87,12 +87,24 @@ impl CreateTableProcedure {
|
||||
self.table_info().ident.table_id
|
||||
}
|
||||
|
||||
fn region_wal_options(&self) -> Option<&HashMap<RegionNumber, String>> {
|
||||
self.creator.data.region_wal_options.as_ref()
|
||||
fn region_wal_options(&self) -> Result<&HashMap<RegionNumber, String>> {
|
||||
self.creator
|
||||
.data
|
||||
.region_wal_options
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "region_wal_options is not allocated",
|
||||
})
|
||||
}
|
||||
|
||||
fn table_route(&self) -> Option<&TableRouteValue> {
|
||||
self.creator.data.table_route.as_ref()
|
||||
fn table_route(&self) -> Result<&TableRouteValue> {
|
||||
self.creator
|
||||
.data
|
||||
.table_route
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "table_route is not allocated",
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
@@ -181,7 +193,7 @@ impl CreateTableProcedure {
|
||||
/// - [Code::Unavailable](tonic::status::Code::Unavailable)
|
||||
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
|
||||
// Safety: the table route must be allocated.
|
||||
match &self.creator.data.table_route.clone().unwrap() {
|
||||
match self.table_route()?.clone() {
|
||||
TableRouteValue::Physical(x) => {
|
||||
let region_routes = x.region_routes.clone();
|
||||
let request_builder = self.new_region_request_builder(None)?;
|
||||
@@ -194,12 +206,12 @@ impl CreateTableProcedure {
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get(physical_table_id)
|
||||
.try_get_physical_table_route(physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
})?;
|
||||
let region_routes = physical_table_route.region_routes()?;
|
||||
let region_routes = &physical_table_route.region_routes;
|
||||
|
||||
let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
|
||||
|
||||
@@ -214,7 +226,7 @@ impl CreateTableProcedure {
|
||||
request_builder: CreateRequestBuilder,
|
||||
) -> Result<Status> {
|
||||
// Safety: the table_route must be allocated.
|
||||
if self.table_route().unwrap().is_physical() {
|
||||
if self.table_route()?.is_physical() {
|
||||
// Registers opening regions
|
||||
let guards = self
|
||||
.creator
|
||||
@@ -226,7 +238,7 @@ impl CreateTableProcedure {
|
||||
|
||||
let create_table_data = &self.creator.data;
|
||||
// Safety: the region_wal_options must be allocated
|
||||
let region_wal_options = self.region_wal_options().unwrap();
|
||||
let region_wal_options = self.region_wal_options()?;
|
||||
let create_table_expr = &create_table_data.task.create_table;
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
@@ -291,9 +303,9 @@ impl CreateTableProcedure {
|
||||
|
||||
let raw_table_info = self.table_info().clone();
|
||||
// Safety: the region_wal_options must be allocated.
|
||||
let region_wal_options = self.region_wal_options().unwrap().clone();
|
||||
let region_wal_options = self.region_wal_options()?.clone();
|
||||
// Safety: the table_route must be allocated.
|
||||
let table_route = self.table_route().unwrap().clone();
|
||||
let table_route = self.table_route()?.clone();
|
||||
manager
|
||||
.create_table_metadata(raw_table_info, table_route, region_wal_options)
|
||||
.await?;
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, tracing};
|
||||
use common_telemetry::{debug, info, tracing};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
@@ -545,7 +545,7 @@ impl ProcedureExecutor for DdlManager {
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
info!("Submitting Ddl task: {:?}", request.task);
|
||||
debug!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
|
||||
@@ -67,6 +67,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute {} txn operations via Etcd", max_operations))]
|
||||
EtcdTxnFailed {
|
||||
max_operations: usize,
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get sequence: {}", err_msg))]
|
||||
NextSequence { err_msg: String, location: Location },
|
||||
|
||||
@@ -340,6 +348,9 @@ pub enum Error {
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to resolve Kafka broker endpoint."))]
|
||||
ResolveKafkaEndpoint { source: common_wal::error::Error },
|
||||
|
||||
#[snafu(display("Failed to build a Kafka controller client"))]
|
||||
BuildKafkaCtrlClient {
|
||||
location: Location,
|
||||
@@ -397,6 +408,7 @@ impl ErrorExt for Error {
|
||||
IllegalServerState { .. }
|
||||
| EtcdTxnOpResponse { .. }
|
||||
| EtcdFailed { .. }
|
||||
| EtcdTxnFailed { .. }
|
||||
| ConnectEtcd { .. } => StatusCode::Internal,
|
||||
|
||||
SerdeJson { .. }
|
||||
@@ -425,6 +437,7 @@ impl ErrorExt for Error {
|
||||
| BuildKafkaClient { .. }
|
||||
| BuildKafkaCtrlClient { .. }
|
||||
| BuildKafkaPartitionClient { .. }
|
||||
| ResolveKafkaEndpoint { .. }
|
||||
| ProduceRecord { .. }
|
||||
| CreateKafkaWalTopic { .. }
|
||||
| EmptyTopicPool { .. }
|
||||
|
||||
@@ -363,8 +363,10 @@ impl TableMetadataManager {
|
||||
Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
Option<DeserializedValueWithBytes<TableRouteValue>>,
|
||||
)> {
|
||||
let (get_table_route_txn, table_route_decoder) =
|
||||
self.table_route_manager.build_get_txn(table_id);
|
||||
let (get_table_route_txn, table_route_decoder) = self
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.build_get_txn(table_id);
|
||||
|
||||
let (get_table_info_txn, table_info_decoder) =
|
||||
self.table_info_manager.build_get_txn(table_id);
|
||||
@@ -414,6 +416,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (create_table_route_txn, on_create_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_create_txn(table_id, &table_route_value)?;
|
||||
|
||||
let mut txn = Txn::merge_all(vec![
|
||||
@@ -461,7 +464,7 @@ impl TableMetadataManager {
|
||||
pub fn max_logical_tables_per_batch(&self) -> usize {
|
||||
// The batch size is max_txn_size / 3 because the size of the `tables_data`
|
||||
// is 3 times the size of the `tables_data`.
|
||||
self.kv_backend.max_txn_size() / 3
|
||||
self.kv_backend.max_txn_ops() / 3
|
||||
}
|
||||
|
||||
/// Creates metadata for multiple logical tables and return an error if different metadata exists.
|
||||
@@ -506,6 +509,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (create_table_route_txn, on_create_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_create_txn(table_id, &table_route_value)?;
|
||||
txns.push(create_table_route_txn);
|
||||
|
||||
@@ -579,6 +583,7 @@ impl TableMetadataManager {
|
||||
// Deletes table route.
|
||||
let delete_table_route_txn = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_delete_txn(table_id, table_route_value)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![
|
||||
@@ -713,6 +718,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
||||
@@ -765,6 +771,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
@@ -853,6 +860,7 @@ mod tests {
|
||||
use bytes::Bytes;
|
||||
use common_time::util::current_time_millis;
|
||||
use futures::TryStreamExt;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
@@ -1049,6 +1057,36 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_many_logical_tables_metadata() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(kv_backend);
|
||||
|
||||
let mut tables_data = vec![];
|
||||
for i in 0..128 {
|
||||
let table_id = i + 1;
|
||||
let regin_number = table_id * 3;
|
||||
let region_id = RegionId::new(table_id, regin_number);
|
||||
let region_route = new_region_route(region_id.as_u64(), 2);
|
||||
let region_routes = vec![region_route.clone()];
|
||||
let table_info: RawTableInfo = test_utils::new_test_table_info_with_name(
|
||||
table_id,
|
||||
&format!("my_table_{}", table_id),
|
||||
region_routes.iter().map(|r| r.region.id.region_number()),
|
||||
)
|
||||
.into();
|
||||
let table_route_value = TableRouteValue::physical(region_routes.clone());
|
||||
|
||||
tables_data.push((table_info, table_route_value));
|
||||
}
|
||||
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_logical_tables_metadata(tables_data)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
@@ -1096,6 +1134,7 @@ mod tests {
|
||||
|
||||
assert!(table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -1120,7 +1159,8 @@ mod tests {
|
||||
|
||||
let removed_table_route = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get_removed(table_id)
|
||||
.table_route_storage()
|
||||
.get_raw_removed(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
@@ -1316,6 +1356,7 @@ mod tests {
|
||||
|
||||
let updated_route_value = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
|
||||
@@ -22,7 +22,7 @@ use table::metadata::TableId;
|
||||
|
||||
use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue};
|
||||
use crate::error::{
|
||||
MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
||||
self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
||||
UnexpectedLogicalRouteTableSnafu,
|
||||
};
|
||||
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
|
||||
@@ -77,7 +77,7 @@ impl TableRouteValue {
|
||||
err_msg: format!("{self:?} is a non-physical TableRouteValue."),
|
||||
}
|
||||
);
|
||||
let version = self.physical_table_route().version;
|
||||
let version = self.as_physical_table_route_ref().version;
|
||||
Ok(Self::Physical(PhysicalTableRouteValue {
|
||||
region_routes,
|
||||
version: version + 1,
|
||||
@@ -95,7 +95,7 @@ impl TableRouteValue {
|
||||
err_msg: format!("{self:?} is a non-physical TableRouteValue."),
|
||||
}
|
||||
);
|
||||
Ok(self.physical_table_route().version)
|
||||
Ok(self.as_physical_table_route_ref().version)
|
||||
}
|
||||
|
||||
/// Returns the corresponding [RegionRoute], returns `None` if it's the specific region is not found.
|
||||
@@ -109,7 +109,7 @@ impl TableRouteValue {
|
||||
}
|
||||
);
|
||||
Ok(self
|
||||
.physical_table_route()
|
||||
.as_physical_table_route_ref()
|
||||
.region_routes
|
||||
.iter()
|
||||
.find(|route| route.region.id == region_id)
|
||||
@@ -129,10 +129,25 @@ impl TableRouteValue {
|
||||
err_msg: format!("{self:?} is a non-physical TableRouteValue."),
|
||||
}
|
||||
);
|
||||
Ok(&self.physical_table_route().region_routes)
|
||||
Ok(&self.as_physical_table_route_ref().region_routes)
|
||||
}
|
||||
|
||||
fn physical_table_route(&self) -> &PhysicalTableRouteValue {
|
||||
/// Returns the reference of [`PhysicalTableRouteValue`].
|
||||
///
|
||||
/// # Panic
|
||||
/// If it is not the [`PhysicalTableRouteValue`].
|
||||
fn as_physical_table_route_ref(&self) -> &PhysicalTableRouteValue {
|
||||
match self {
|
||||
TableRouteValue::Physical(x) => x,
|
||||
_ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts to [`PhysicalTableRouteValue`].
|
||||
///
|
||||
/// # Panic
|
||||
/// If it is not the [`PhysicalTableRouteValue`].
|
||||
fn into_physical_table_route(self) -> PhysicalTableRouteValue {
|
||||
match self {
|
||||
TableRouteValue::Physical(x) => x,
|
||||
_ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
|
||||
@@ -213,111 +228,53 @@ impl Display for TableRouteKey {
|
||||
}
|
||||
|
||||
pub struct TableRouteManager {
|
||||
kv_backend: KvBackendRef,
|
||||
storage: TableRouteStorage,
|
||||
}
|
||||
|
||||
impl TableRouteManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
Self {
|
||||
storage: TableRouteStorage::new(kv_backend),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_get_txn(
|
||||
/// Returns the [`PhysicalTableRouteValue`] in the first level,
|
||||
/// It won't follow the [`LogicalTableRouteValue`] to find the next level [`PhysicalTableRouteValue`].
|
||||
///
|
||||
/// Returns an error if the first level value is not a [`PhysicalTableRouteValue`].
|
||||
pub async fn try_get_physical_table_route(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
) {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
(txn, txn_helper::build_txn_response_decoder_fn(raw_key))
|
||||
}
|
||||
|
||||
/// Builds a create table route transaction. it expected the `__table_route/{table_id}` wasn't occupied.
|
||||
pub fn build_create_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
let txn = txn_helper::build_put_if_absent_txn(
|
||||
raw_key.clone(),
|
||||
table_route_value.try_as_raw_value()?,
|
||||
);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a update table route transaction, it expected the remote value equals the `current_table_route_value`.
|
||||
/// It retrieves the latest value if the comparing failed.
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = current_table_route_value.get_raw_bytes();
|
||||
let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?;
|
||||
|
||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a delete table route transaction, it expected the remote value equals the `table_route_value`.
|
||||
pub(crate) fn build_delete_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<Txn> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = table_route_value.get_raw_bytes();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
|
||||
|
||||
let txn = Txn::new().and_then(vec![
|
||||
TxnOp::Delete(raw_key),
|
||||
TxnOp::Put(removed_key.into_bytes(), raw_value),
|
||||
]);
|
||||
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
self.kv_backend
|
||||
.get(&key.as_raw_key())
|
||||
.await?
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
) -> Result<Option<PhysicalTableRouteValue>> {
|
||||
match self.storage.get(table_id).await? {
|
||||
Some(route) => {
|
||||
ensure!(
|
||||
route.is_physical(),
|
||||
error::UnexpectedLogicalRouteTableSnafu {
|
||||
err_msg: format!("{route:?} is a non-physical TableRouteValue.")
|
||||
}
|
||||
);
|
||||
Ok(Some(route.into_physical_table_route()))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [TableId] recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - the table(`logical_or_physical_table_id`) does not exist.
|
||||
pub async fn get_physical_table_id(
|
||||
&self,
|
||||
logical_or_physical_table_id: TableId,
|
||||
) -> Result<TableId> {
|
||||
let table_route = self
|
||||
.storage
|
||||
.get(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
})?;
|
||||
|
||||
match table_route {
|
||||
TableRouteValue::Physical(_) => Ok(logical_or_physical_table_id),
|
||||
@@ -325,46 +282,58 @@ impl TableRouteManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [TableRouteValue::Physical] of table.
|
||||
/// Returns the [TableRouteValue::Physical] recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - the physical table(`logical_or_physical_table_id`) does not exists
|
||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exists.
|
||||
/// - the physical table(`logical_or_physical_table_id`) does not exist
|
||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist.
|
||||
pub async fn get_physical_table_route(
|
||||
&self,
|
||||
logical_or_physical_table_id: TableId,
|
||||
) -> Result<(TableId, PhysicalTableRouteValue)> {
|
||||
let table_route = self
|
||||
.storage
|
||||
.get(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
})?;
|
||||
|
||||
match table_route {
|
||||
TableRouteValue::Physical(x) => Ok((logical_or_physical_table_id, x)),
|
||||
TableRouteValue::Logical(x) => {
|
||||
let physical_table_id = x.physical_table_id();
|
||||
let physical_table_route =
|
||||
self.get(physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
})?;
|
||||
Ok((
|
||||
physical_table_id,
|
||||
physical_table_route.physical_table_route().clone(),
|
||||
))
|
||||
let physical_table_route = self.storage.get(physical_table_id).await?.context(
|
||||
TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
},
|
||||
)?;
|
||||
let physical_table_route = physical_table_route.into_physical_table_route();
|
||||
Ok((physical_table_id, physical_table_route))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [TableRouteValue::Physical] recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - one of the logical tables corresponding to the physical table does not exist.
|
||||
///
|
||||
/// **Notes**: it may return a subset of `logical_or_physical_table_ids`.
|
||||
pub async fn batch_get_physical_table_routes(
|
||||
&self,
|
||||
logical_or_physical_table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, PhysicalTableRouteValue>> {
|
||||
let table_routes = self.batch_get(logical_or_physical_table_ids).await?;
|
||||
let table_routes = self
|
||||
.storage
|
||||
.batch_get(logical_or_physical_table_ids)
|
||||
.await?;
|
||||
// Returns a subset of `logical_or_physical_table_ids`.
|
||||
let table_routes = table_routes
|
||||
.into_iter()
|
||||
.zip(logical_or_physical_table_ids)
|
||||
.filter_map(|(route, id)| route.map(|route| (*id, route)))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut physical_table_routes = HashMap::with_capacity(table_routes.len());
|
||||
let mut logical_table_ids = HashMap::with_capacity(table_routes.len());
|
||||
@@ -384,13 +353,22 @@ impl TableRouteManager {
|
||||
return Ok(physical_table_routes);
|
||||
}
|
||||
|
||||
// Finds the logical tables corresponding to the physical tables.
|
||||
let physical_table_ids = logical_table_ids
|
||||
.values()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let table_routes = self.batch_get(&physical_table_ids).await?;
|
||||
let table_routes = self
|
||||
.table_route_storage()
|
||||
.batch_get(&physical_table_ids)
|
||||
.await?;
|
||||
let table_routes = table_routes
|
||||
.into_iter()
|
||||
.zip(physical_table_ids)
|
||||
.filter_map(|(route, id)| route.map(|route| (id, route)))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (logical_table_id, physical_table_id) in logical_table_ids {
|
||||
let table_route =
|
||||
@@ -419,40 +397,114 @@ impl TableRouteManager {
|
||||
Ok(physical_table_routes)
|
||||
}
|
||||
|
||||
/// It may return a subset of the `table_ids`.
|
||||
pub async fn batch_get(
|
||||
/// Returns [`RegionDistribution`] of the table(`table_id`).
|
||||
pub async fn get_region_distribution(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, TableRouteValue>> {
|
||||
let lookup_table = table_ids
|
||||
.iter()
|
||||
.map(|id| (TableRouteKey::new(*id).as_raw_key(), id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
table_id: TableId,
|
||||
) -> Result<Option<RegionDistribution>> {
|
||||
self.storage
|
||||
.get(table_id)
|
||||
.await?
|
||||
.map(|table_route| Ok(region_distribution(table_route.region_routes()?)))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: lookup_table.keys().cloned().collect::<Vec<_>>(),
|
||||
})
|
||||
.await?;
|
||||
/// Returns low-level APIs.
|
||||
pub fn table_route_storage(&self) -> &TableRouteStorage {
|
||||
&self.storage
|
||||
}
|
||||
}
|
||||
|
||||
let values = resp
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| {
|
||||
Ok((
|
||||
// Safety: must exist.
|
||||
**lookup_table.get(kv.key()).unwrap(),
|
||||
TableRouteValue::try_from_raw_value(&kv.value)?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>>>()?;
|
||||
/// Low-level operations of [TableRouteValue].
|
||||
pub struct TableRouteStorage {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
Ok(values)
|
||||
impl TableRouteStorage {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Builds a get table route transaction(readonly).
|
||||
pub(crate) fn build_get_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
) {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
(txn, txn_helper::build_txn_response_decoder_fn(raw_key))
|
||||
}
|
||||
|
||||
/// Builds a create table route transaction,
|
||||
/// it expected the `__table_route/{table_id}` wasn't occupied.
|
||||
pub fn build_create_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
let txn = txn_helper::build_put_if_absent_txn(
|
||||
raw_key.clone(),
|
||||
table_route_value.try_as_raw_value()?,
|
||||
);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a update table route transaction,
|
||||
/// it expected the remote value equals the `current_table_route_value`.
|
||||
/// It retrieves the latest value if the comparing failed.
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = current_table_route_value.get_raw_bytes();
|
||||
let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?;
|
||||
|
||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a delete table route transaction,
|
||||
/// it expected the remote value equals the `table_route_value`.
|
||||
pub(crate) fn build_delete_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<Txn> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = table_route_value.get_raw_bytes();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
|
||||
|
||||
let txn = Txn::new().and_then(vec![
|
||||
TxnOp::Delete(raw_key),
|
||||
TxnOp::Put(removed_key.into_bytes(), raw_value),
|
||||
]);
|
||||
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn get_removed(
|
||||
pub async fn get_raw_removed(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
@@ -465,20 +517,64 @@ impl TableRouteManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn get_region_distribution(
|
||||
/// Returns the [`TableRouteValue`].
|
||||
pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
self.kv_backend
|
||||
.get(&key.as_raw_key())
|
||||
.await?
|
||||
.map(|kv| TableRouteValue::try_from_raw_value(&kv.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the [`TableRouteValue`] wrapped with [`DeserializedValueWithBytes`].
|
||||
pub async fn get_raw(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<RegionDistribution>> {
|
||||
self.get(table_id)
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
self.kv_backend
|
||||
.get(&key.as_raw_key())
|
||||
.await?
|
||||
.map(|table_route| Ok(region_distribution(table_route.region_routes()?)))
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`.
|
||||
pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
||||
let keys = table_ids
|
||||
.iter()
|
||||
.map(|id| TableRouteKey::new(*id).as_raw_key())
|
||||
.collect::<Vec<_>>();
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest { keys: keys.clone() })
|
||||
.await?;
|
||||
|
||||
let kvs = resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.value))
|
||||
.collect::<HashMap<_, _>>();
|
||||
keys.into_iter()
|
||||
.map(|key| {
|
||||
if let Some(value) = kvs.get(&key) {
|
||||
Ok(Some(TableRouteValue::try_from_raw_value(value)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::TxnService;
|
||||
|
||||
#[test]
|
||||
fn test_table_route_compatibility() {
|
||||
@@ -491,4 +587,81 @@ mod tests {
|
||||
r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }], version: 0 })"#
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_route_storage_get_raw_empty() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv);
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_route_storage_get_raw() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv.clone());
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_none());
|
||||
let table_route_manager = TableRouteManager::new(kv.clone());
|
||||
let table_route_value = TableRouteValue::Logical(LogicalTableRouteValue {
|
||||
physical_table_id: 1023,
|
||||
region_ids: vec![RegionId::new(1023, 1)],
|
||||
});
|
||||
let (txn, _) = table_route_manager
|
||||
.table_route_storage()
|
||||
.build_create_txn(1024, &table_route_value)
|
||||
.unwrap();
|
||||
let r = kv.txn(txn).await.unwrap();
|
||||
assert!(r.succeeded);
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_some());
|
||||
let got = table_route.unwrap().inner;
|
||||
assert_eq!(got, table_route_value);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_route_batch_get() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv.clone());
|
||||
let routes = table_route_storage
|
||||
.batch_get(&[1023, 1024, 1025])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(routes.iter().all(Option::is_none));
|
||||
let table_route_manager = TableRouteManager::new(kv.clone());
|
||||
let routes = [
|
||||
(
|
||||
1024,
|
||||
TableRouteValue::Logical(LogicalTableRouteValue {
|
||||
physical_table_id: 1023,
|
||||
region_ids: vec![RegionId::new(1023, 1)],
|
||||
}),
|
||||
),
|
||||
(
|
||||
1025,
|
||||
TableRouteValue::Logical(LogicalTableRouteValue {
|
||||
physical_table_id: 1023,
|
||||
region_ids: vec![RegionId::new(1023, 2)],
|
||||
}),
|
||||
),
|
||||
];
|
||||
for (table_id, route) in &routes {
|
||||
let (txn, _) = table_route_manager
|
||||
.table_route_storage()
|
||||
.build_create_txn(*table_id, route)
|
||||
.unwrap();
|
||||
let r = kv.txn(txn).await.unwrap();
|
||||
assert!(r.succeeded);
|
||||
}
|
||||
|
||||
let results = table_route_storage
|
||||
.batch_get(&[9999, 1025, 8888, 1024])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(results[0].is_none());
|
||||
assert_eq!(results[1].as_ref().unwrap(), &routes[1].1);
|
||||
assert!(results[2].is_none());
|
||||
assert_eq!(results[3].as_ref().unwrap(), &routes[0].1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,9 @@ use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder};
|
||||
|
||||
pub fn new_test_table_info<I: IntoIterator<Item = u32>>(
|
||||
pub fn new_test_table_info_with_name<I: IntoIterator<Item = u32>>(
|
||||
table_id: TableId,
|
||||
table_name: &str,
|
||||
region_numbers: I,
|
||||
) -> TableInfo {
|
||||
let column_schemas = vec![
|
||||
@@ -50,8 +51,14 @@ pub fn new_test_table_info<I: IntoIterator<Item = u32>>(
|
||||
TableInfoBuilder::default()
|
||||
.table_id(table_id)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.name(table_name)
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
pub fn new_test_table_info<I: IntoIterator<Item = u32>>(
|
||||
table_id: TableId,
|
||||
region_numbers: I,
|
||||
) -> TableInfo {
|
||||
new_test_table_info_with_name(table_id, "mytable", region_numbers)
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ impl TxnService for ChrootKvBackend {
|
||||
let txn_res = self.inner.txn(txn).await?;
|
||||
Ok(self.chroot_txn_response(txn_res))
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.inner.max_txn_ops()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -33,12 +33,6 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
// Maximum number of operations permitted in a transaction.
|
||||
// The etcd default configuration's `--max-txn-ops` is 128.
|
||||
//
|
||||
// For more detail, see: https://etcd.io/docs/v3.5/op-guide/configuration/
|
||||
const MAX_TXN_SIZE: usize = 128;
|
||||
|
||||
fn convert_key_value(kv: etcd_client::KeyValue) -> KeyValue {
|
||||
let (key, value) = kv.into_key_value();
|
||||
KeyValue { key, value }
|
||||
@@ -46,10 +40,15 @@ fn convert_key_value(kv: etcd_client::KeyValue) -> KeyValue {
|
||||
|
||||
pub struct EtcdStore {
|
||||
client: Client,
|
||||
// Maximum number of operations permitted in a transaction.
|
||||
// The etcd default configuration's `--max-txn-ops` is 128.
|
||||
//
|
||||
// For more detail, see: https://etcd.io/docs/v3.5/op-guide/configuration/
|
||||
max_txn_ops: usize,
|
||||
}
|
||||
|
||||
impl EtcdStore {
|
||||
pub async fn with_endpoints<E, S>(endpoints: S) -> Result<KvBackendRef>
|
||||
pub async fn with_endpoints<E, S>(endpoints: S, max_txn_ops: usize) -> Result<KvBackendRef>
|
||||
where
|
||||
E: AsRef<str>,
|
||||
S: AsRef<[E]>,
|
||||
@@ -58,16 +57,19 @@ impl EtcdStore {
|
||||
.await
|
||||
.context(error::ConnectEtcdSnafu)?;
|
||||
|
||||
Ok(Self::with_etcd_client(client))
|
||||
Ok(Self::with_etcd_client(client, max_txn_ops))
|
||||
}
|
||||
|
||||
pub fn with_etcd_client(client: Client) -> KvBackendRef {
|
||||
Arc::new(Self { client })
|
||||
pub fn with_etcd_client(client: Client, max_txn_ops: usize) -> KvBackendRef {
|
||||
Arc::new(Self {
|
||||
client,
|
||||
max_txn_ops,
|
||||
})
|
||||
}
|
||||
|
||||
async fn do_multi_txn(&self, txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> {
|
||||
let max_txn_size = self.max_txn_size();
|
||||
if txn_ops.len() < max_txn_size {
|
||||
let max_txn_ops = self.max_txn_ops();
|
||||
if txn_ops.len() < max_txn_ops {
|
||||
// fast path
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
@@ -83,7 +85,7 @@ impl EtcdStore {
|
||||
}
|
||||
|
||||
let txns = txn_ops
|
||||
.chunks(max_txn_size)
|
||||
.chunks(max_txn_ops)
|
||||
.map(|part| async move {
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
@@ -311,18 +313,20 @@ impl TxnService for EtcdStore {
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
.start_timer();
|
||||
|
||||
let max_operations = txn.max_operations();
|
||||
|
||||
let etcd_txn: Txn = txn.into();
|
||||
let txn_res = self
|
||||
.client
|
||||
.kv_client()
|
||||
.txn(etcd_txn)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
.context(error::EtcdTxnFailedSnafu { max_operations })?;
|
||||
txn_res.try_into()
|
||||
}
|
||||
|
||||
fn max_txn_size(&self) -> usize {
|
||||
MAX_TXN_SIZE
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.max_txn_ops
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -323,6 +323,10 @@ impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
|
||||
responses,
|
||||
})
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
usize::MAX
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ErrorExt + Send + Sync + 'static> ResettableKvBackend for MemoryKvBackend<T> {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::cmp::max;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
|
||||
use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
@@ -27,8 +29,8 @@ pub trait TxnService: Sync + Send {
|
||||
}
|
||||
|
||||
/// Maximum number of operations permitted in a transaction.
|
||||
fn max_txn_size(&self) -> usize {
|
||||
usize::MAX
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
unimplemented!("txn is not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,6 +194,12 @@ impl Txn {
|
||||
self.req.failure = operations.into();
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn max_operations(&self) -> usize {
|
||||
let opc = max(self.req.compare.len(), self.req.success.len());
|
||||
max(opc, self.req.failure.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Txn> for TxnRequest {
|
||||
|
||||
@@ -34,10 +34,14 @@ pub struct SequenceBuilder {
|
||||
max: u64,
|
||||
}
|
||||
|
||||
fn seq_name(name: impl AsRef<str>) -> String {
|
||||
format!("{}-{}", SEQ_PREFIX, name.as_ref())
|
||||
}
|
||||
|
||||
impl SequenceBuilder {
|
||||
pub fn new(name: impl AsRef<str>, generator: KvBackendRef) -> Self {
|
||||
Self {
|
||||
name: format!("{}-{}", SEQ_PREFIX, name.as_ref()),
|
||||
name: seq_name(name),
|
||||
initial: 0,
|
||||
step: 1,
|
||||
generator,
|
||||
@@ -138,13 +142,14 @@ impl Inner {
|
||||
pub async fn next_range(&self) -> Result<Range<u64>> {
|
||||
let key = self.name.as_bytes();
|
||||
let mut start = self.next;
|
||||
for _ in 0..self.force_quit {
|
||||
let expect = if start == self.initial {
|
||||
vec![]
|
||||
} else {
|
||||
u64::to_le_bytes(start).to_vec()
|
||||
};
|
||||
|
||||
let mut expect = if start == self.initial {
|
||||
vec![]
|
||||
} else {
|
||||
u64::to_le_bytes(start).to_vec()
|
||||
};
|
||||
|
||||
for _ in 0..self.force_quit {
|
||||
let step = self.step.min(self.max - start);
|
||||
|
||||
ensure!(
|
||||
@@ -167,15 +172,24 @@ impl Inner {
|
||||
|
||||
if !res.success {
|
||||
if let Some(kv) = res.prev_kv {
|
||||
let value = kv.value;
|
||||
ensure!(
|
||||
value.len() == std::mem::size_of::<u64>(),
|
||||
error::UnexpectedSequenceValueSnafu {
|
||||
err_msg: format!("key={}, unexpected value={:?}", self.name, value)
|
||||
expect = kv.value.clone();
|
||||
|
||||
let v: [u8; 8] = match kv.value.try_into() {
|
||||
Ok(a) => a,
|
||||
Err(v) => {
|
||||
return error::UnexpectedSequenceValueSnafu {
|
||||
err_msg: format!("Not a valid u64 for '{}': {v:?}", self.name),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
);
|
||||
start = u64::from_le_bytes(value.try_into().unwrap());
|
||||
};
|
||||
let v = u64::from_le_bytes(v);
|
||||
|
||||
// If the existed value is smaller than the initial, we should start from the initial.
|
||||
start = v.max(self.initial);
|
||||
} else {
|
||||
expect = vec![];
|
||||
|
||||
start = self.initial;
|
||||
}
|
||||
continue;
|
||||
@@ -197,8 +211,12 @@ impl Inner {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::{Itertools, MinMaxResult};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -209,6 +227,76 @@ mod tests {
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_with_existed_value() {
|
||||
async fn test(exist: u64, expected: Vec<u64>) {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
let exist = u64::to_le_bytes(exist);
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key(seq_name("s")).with_value(exist))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let initial = 100;
|
||||
let seq = SequenceBuilder::new("s", kv_backend)
|
||||
.initial(initial)
|
||||
.build();
|
||||
|
||||
let mut actual = Vec::with_capacity(expected.len());
|
||||
for _ in 0..expected.len() {
|
||||
actual.push(seq.next().await.unwrap());
|
||||
}
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
|
||||
// put a value not greater than the "initial", the sequence should start from "initial"
|
||||
test(1, vec![100, 101, 102]).await;
|
||||
test(100, vec![100, 101, 102]).await;
|
||||
|
||||
// put a value greater than the "initial", the sequence should start from the put value
|
||||
test(200, vec![200, 201, 202]).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_sequence_with_contention() {
|
||||
let seq = Arc::new(
|
||||
SequenceBuilder::new("s", Arc::new(MemoryKvBackend::default()))
|
||||
.initial(1024)
|
||||
.build(),
|
||||
);
|
||||
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
// Spawn 10 tasks to concurrently get the next sequence. Each task will get 100 sequences.
|
||||
for _ in 0..10 {
|
||||
tokio::spawn({
|
||||
let seq = seq.clone();
|
||||
let tx = tx.clone();
|
||||
async move {
|
||||
for _ in 0..100 {
|
||||
tx.send(seq.next().await.unwrap()).unwrap()
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Test that we get 1000 unique sequences, and start from 1024 to 2023.
|
||||
let mut nums = HashSet::new();
|
||||
let mut c = 0;
|
||||
while c < 1000
|
||||
&& let Some(x) = rx.recv().await
|
||||
{
|
||||
nums.insert(x);
|
||||
c += 1;
|
||||
}
|
||||
assert_eq!(nums.len(), 1000);
|
||||
let MinMaxResult::MinMax(min, max) = nums.iter().minmax() else {
|
||||
unreachable!("nums has more than one elements");
|
||||
};
|
||||
assert_eq!(*min, 1024);
|
||||
assert_eq!(*max, 2023);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
pub use common_base::AffectedRows;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
@@ -29,8 +30,6 @@ use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::sequence::SequenceBuilder;
|
||||
use crate::wal_options_allocator::WalOptionsAllocator;
|
||||
|
||||
pub type AffectedRows = u64;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait MockDatanodeHandler: Sync + Send + Clone {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows>;
|
||||
|
||||
@@ -30,7 +30,7 @@ use snafu::{ensure, ResultExt};
|
||||
use crate::error::{
|
||||
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, BuildKafkaPartitionClientSnafu,
|
||||
CreateKafkaWalTopicSnafu, DecodeJsonSnafu, EncodeJsonSnafu, InvalidNumTopicsSnafu,
|
||||
ProduceRecordSnafu, Result,
|
||||
ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
@@ -117,7 +117,10 @@ impl TopicManager {
|
||||
base: self.config.backoff.base as f64,
|
||||
deadline: self.config.backoff.deadline,
|
||||
};
|
||||
let client = ClientBuilder::new(self.config.broker_endpoints.clone())
|
||||
let broker_endpoints = common_wal::resolve_to_ipv4(&self.config.broker_endpoints)
|
||||
.await
|
||||
.context(ResolveKafkaEndpointSnafu)?;
|
||||
let client = ClientBuilder::new(broker_endpoints)
|
||||
.backoff_config(backoff_config)
|
||||
.build()
|
||||
.await
|
||||
|
||||
@@ -152,7 +152,7 @@ impl Runner {
|
||||
guard.key_guards.push(key_guard);
|
||||
}
|
||||
|
||||
// Execute the procedure. We need to release the lock whenever the the execution
|
||||
// Execute the procedure. We need to release the lock whenever the execution
|
||||
// is successful or fail.
|
||||
self.execute_procedure_in_loop().await;
|
||||
|
||||
|
||||
@@ -30,38 +30,87 @@ pub mod prelude;
|
||||
mod signature;
|
||||
use sqlparser_derive::{Visit, VisitMut};
|
||||
|
||||
// sql output
|
||||
pub enum Output {
|
||||
/// new Output struct with output data(previously Output) and output meta
|
||||
#[derive(Debug)]
|
||||
pub struct Output {
|
||||
pub data: OutputData,
|
||||
pub meta: OutputMeta,
|
||||
}
|
||||
|
||||
/// Original Output struct
|
||||
/// carrying result data to response/client/user interface
|
||||
pub enum OutputData {
|
||||
AffectedRows(usize),
|
||||
RecordBatches(RecordBatches),
|
||||
Stream(SendableRecordBatchStream, Option<Arc<dyn PhysicalPlan>>),
|
||||
Stream(SendableRecordBatchStream),
|
||||
}
|
||||
|
||||
/// OutputMeta stores meta information produced/generated during the execution
|
||||
#[derive(Debug, Default)]
|
||||
pub struct OutputMeta {
|
||||
/// May exist for query output. One can retrieve execution metrics from this plan.
|
||||
pub plan: Option<Arc<dyn PhysicalPlan>>,
|
||||
pub cost: usize,
|
||||
}
|
||||
|
||||
impl Output {
|
||||
// helper function to build original `Output::Stream`
|
||||
pub fn new_stream(stream: SendableRecordBatchStream) -> Self {
|
||||
Output::Stream(stream, None)
|
||||
pub fn new_with_affected_rows(affected_rows: usize) -> Self {
|
||||
Self {
|
||||
data: OutputData::AffectedRows(affected_rows),
|
||||
meta: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_record_batches(recordbatches: RecordBatches) -> Self {
|
||||
Self {
|
||||
data: OutputData::RecordBatches(recordbatches),
|
||||
meta: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_stream(stream: SendableRecordBatchStream) -> Self {
|
||||
Self {
|
||||
data: OutputData::Stream(stream),
|
||||
meta: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(data: OutputData, meta: OutputMeta) -> Self {
|
||||
Self { data, meta }
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Output {
|
||||
impl Debug for OutputData {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Output::AffectedRows(rows) => write!(f, "Output::AffectedRows({rows})"),
|
||||
Output::RecordBatches(recordbatches) => {
|
||||
write!(f, "Output::RecordBatches({recordbatches:?})")
|
||||
OutputData::AffectedRows(rows) => write!(f, "OutputData::AffectedRows({rows})"),
|
||||
OutputData::RecordBatches(recordbatches) => {
|
||||
write!(f, "OutputData::RecordBatches({recordbatches:?})")
|
||||
}
|
||||
Output::Stream(_, df) => {
|
||||
if df.is_some() {
|
||||
write!(f, "Output::Stream(<stream>, Some<physical_plan>)")
|
||||
} else {
|
||||
write!(f, "Output::Stream(<stream>)")
|
||||
}
|
||||
OutputData::Stream(_) => {
|
||||
write!(f, "OutputData::Stream(<stream>)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl OutputMeta {
|
||||
pub fn new(plan: Option<Arc<dyn PhysicalPlan>>, cost: usize) -> Self {
|
||||
Self { plan, cost }
|
||||
}
|
||||
|
||||
pub fn new_with_plan(plan: Arc<dyn PhysicalPlan>) -> Self {
|
||||
Self {
|
||||
plan: Some(plan),
|
||||
cost: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_cost(cost: usize) -> Self {
|
||||
Self { plan: None, cost }
|
||||
}
|
||||
}
|
||||
|
||||
pub use datafusion::physical_plan::ExecutionPlan as DfPhysicalPlan;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
|
||||
@@ -32,7 +32,7 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::{
|
||||
DfRecordBatch, DfSendableRecordBatchStream, RecordBatch, RecordBatchStream,
|
||||
DfRecordBatch, DfSendableRecordBatchStream, OrderOption, RecordBatch, RecordBatchStream,
|
||||
SendableRecordBatchStream, Stream,
|
||||
};
|
||||
|
||||
@@ -228,6 +228,10 @@ impl RecordBatchStream for RecordBatchStreamAdapter {
|
||||
Metrics::Unavailable | Metrics::Unresolved(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for RecordBatchStreamAdapter {
|
||||
@@ -316,6 +320,14 @@ impl RecordBatchStream for AsyncRecordBatchStreamAdapter {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for AsyncRecordBatchStreamAdapter {
|
||||
@@ -375,6 +387,14 @@ mod test {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for MaybeErrorRecordBatchStream {
|
||||
|
||||
@@ -39,13 +39,9 @@ use snafu::{ensure, ResultExt};
|
||||
pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]>;
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics>;
|
||||
}
|
||||
|
||||
pub type SendableRecordBatchStream = Pin<Box<dyn RecordBatchStream + Send>>;
|
||||
@@ -74,6 +70,14 @@ impl RecordBatchStream for EmptyRecordBatchStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for EmptyRecordBatchStream {
|
||||
@@ -192,6 +196,14 @@ impl RecordBatchStream for SimpleRecordBatchStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.inner.schema()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for SimpleRecordBatchStream {
|
||||
|
||||
@@ -41,7 +41,8 @@ mod tests {
|
||||
use futures::Stream;
|
||||
|
||||
use super::*;
|
||||
use crate::RecordBatchStream;
|
||||
use crate::adapter::RecordBatchMetrics;
|
||||
use crate::{OrderOption, RecordBatchStream};
|
||||
|
||||
struct MockRecordBatchStream {
|
||||
batch: Option<RecordBatch>,
|
||||
@@ -52,6 +53,14 @@ mod tests {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for MockRecordBatchStream {
|
||||
|
||||
@@ -12,11 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod logging;
|
||||
mod macros;
|
||||
pub mod metric;
|
||||
mod panic_hook;
|
||||
pub mod tracing_context;
|
||||
mod tracing_sampler;
|
||||
|
||||
pub use logging::{init_default_ut_logging, init_global_logging};
|
||||
pub use metric::dump_metrics;
|
||||
|
||||
@@ -31,6 +31,7 @@ use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{filter, EnvFilter, Registry};
|
||||
|
||||
use crate::tracing_sampler::{create_sampler, TracingSampleOptions};
|
||||
pub use crate::{debug, error, info, trace, warn};
|
||||
|
||||
const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
|
||||
@@ -42,7 +43,7 @@ pub struct LoggingOptions {
|
||||
pub level: Option<String>,
|
||||
pub enable_otlp_tracing: bool,
|
||||
pub otlp_endpoint: Option<String>,
|
||||
pub tracing_sample_ratio: Option<f64>,
|
||||
pub tracing_sample_ratio: Option<TracingSampleOptions>,
|
||||
pub append_stdout: bool,
|
||||
}
|
||||
|
||||
@@ -176,8 +177,10 @@ pub fn init_global_logging(
|
||||
.expect("error parsing log level string");
|
||||
let sampler = opts
|
||||
.tracing_sample_ratio
|
||||
.map(Sampler::TraceIdRatioBased)
|
||||
.unwrap_or(Sampler::AlwaysOn);
|
||||
.as_ref()
|
||||
.map(create_sampler)
|
||||
.map(Sampler::ParentBased)
|
||||
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
|
||||
// Must enable 'tokio_unstable' cfg to use this feature.
|
||||
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
|
||||
#[cfg(feature = "tokio-console")]
|
||||
|
||||
176
src/common/telemetry/src/tracing_sampler.rs
Normal file
176
src/common/telemetry/src/tracing_sampler.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use opentelemetry::trace::{
|
||||
Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState,
|
||||
};
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry_sdk::trace::{Sampler, ShouldSample};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct TracingSampleOptions {
|
||||
pub default_ratio: f64,
|
||||
pub rules: Vec<TracingSampleRule>,
|
||||
}
|
||||
|
||||
impl Default for TracingSampleOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
default_ratio: 1.0,
|
||||
rules: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the sampling rate of a span according to the `rules` provided in `RuleSampler`.
|
||||
/// For spans that do not hit any `rules`, the `default_ratio` is used.
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct TracingSampleRule {
|
||||
pub protocol: String,
|
||||
pub request_types: HashSet<String>,
|
||||
pub ratio: f64,
|
||||
}
|
||||
|
||||
impl TracingSampleRule {
|
||||
pub fn match_rule(&self, protocol: &str, request_type: Option<&str>) -> Option<f64> {
|
||||
if protocol == self.protocol {
|
||||
if self.request_types.is_empty() {
|
||||
Some(self.ratio)
|
||||
} else if let Some(t) = request_type
|
||||
&& self.request_types.contains(t)
|
||||
{
|
||||
Some(self.ratio)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for TracingSampleOptions {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.default_ratio == other.default_ratio && self.rules == other.rules
|
||||
}
|
||||
}
|
||||
impl PartialEq for TracingSampleRule {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.protocol == other.protocol
|
||||
&& self.request_types == other.request_types
|
||||
&& self.ratio == other.ratio
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TracingSampleOptions {}
|
||||
impl Eq for TracingSampleRule {}
|
||||
|
||||
pub fn create_sampler(opt: &TracingSampleOptions) -> Box<dyn ShouldSample> {
|
||||
if opt.rules.is_empty() {
|
||||
Box::new(Sampler::TraceIdRatioBased(opt.default_ratio))
|
||||
} else {
|
||||
Box::new(opt.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl ShouldSample for TracingSampleOptions {
|
||||
fn should_sample(
|
||||
&self,
|
||||
parent_context: Option<&opentelemetry::Context>,
|
||||
trace_id: TraceId,
|
||||
_name: &str,
|
||||
_span_kind: &SpanKind,
|
||||
attributes: &[KeyValue],
|
||||
_links: &[Link],
|
||||
) -> SamplingResult {
|
||||
let (mut protocol, mut request_type) = (None, None);
|
||||
for kv in attributes {
|
||||
match kv.key.as_str() {
|
||||
"protocol" => protocol = Some(kv.value.as_str()),
|
||||
"request_type" => request_type = Some(kv.value.as_str()),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
let ratio = protocol
|
||||
.and_then(|p| {
|
||||
self.rules
|
||||
.iter()
|
||||
.find_map(|rule| rule.match_rule(p.as_ref(), request_type.as_deref()))
|
||||
})
|
||||
.unwrap_or(self.default_ratio);
|
||||
SamplingResult {
|
||||
decision: sample_based_on_probability(ratio, trace_id),
|
||||
// No extra attributes ever set by the SDK samplers.
|
||||
attributes: Vec::new(),
|
||||
// all sampler in SDK will not modify trace state.
|
||||
trace_state: match parent_context {
|
||||
Some(ctx) => ctx.span().span_context().trace_state().clone(),
|
||||
None => TraceState::default(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The code here mainly refers to the relevant implementation of
|
||||
/// [opentelemetry](https://github.com/open-telemetry/opentelemetry-rust/blob/ef4701055cc39d3448d5e5392812ded00cdd4476/opentelemetry-sdk/src/trace/sampler.rs#L229),
|
||||
/// and determines whether the span needs to be collected based on the `TraceId` and sampling rate (i.e. `prob`).
|
||||
fn sample_based_on_probability(prob: f64, trace_id: TraceId) -> SamplingDecision {
|
||||
if prob >= 1.0 {
|
||||
SamplingDecision::RecordAndSample
|
||||
} else {
|
||||
let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64;
|
||||
let bytes = trace_id.to_bytes();
|
||||
let (_, low) = bytes.split_at(8);
|
||||
let trace_id_low = u64::from_be_bytes(low.try_into().unwrap());
|
||||
let rnd_from_trace_id = trace_id_low >> 1;
|
||||
|
||||
if rnd_from_trace_id < prob_upper_bound {
|
||||
SamplingDecision::RecordAndSample
|
||||
} else {
|
||||
SamplingDecision::Drop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::tracing_sampler::TracingSampleRule;
|
||||
|
||||
#[test]
|
||||
fn test_rule() {
|
||||
let rule = TracingSampleRule {
|
||||
protocol: "http".to_string(),
|
||||
request_types: HashSet::new(),
|
||||
ratio: 1.0,
|
||||
};
|
||||
assert_eq!(rule.match_rule("not_http", None), None);
|
||||
assert_eq!(rule.match_rule("http", None), Some(1.0));
|
||||
assert_eq!(rule.match_rule("http", Some("abc")), Some(1.0));
|
||||
let rule1 = TracingSampleRule {
|
||||
protocol: "http".to_string(),
|
||||
request_types: HashSet::from(["mysql".to_string()]),
|
||||
ratio: 1.0,
|
||||
};
|
||||
assert_eq!(rule1.match_rule("http", None), None);
|
||||
assert_eq!(rule1.match_rule("http", Some("abc")), None);
|
||||
assert_eq!(rule1.match_rule("http", Some("mysql")), Some(1.0));
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use client::Database;
|
||||
use common_query::Output;
|
||||
use common_query::OutputData;
|
||||
use common_recordbatch::util;
|
||||
|
||||
pub enum ExpectedOutput<'a> {
|
||||
@@ -23,22 +23,24 @@ pub enum ExpectedOutput<'a> {
|
||||
|
||||
pub async fn execute_and_check_output(db: &Database, sql: &str, expected: ExpectedOutput<'_>) {
|
||||
let output = db.sql(sql).await.unwrap();
|
||||
let output = output.data;
|
||||
|
||||
match (&output, expected) {
|
||||
(Output::AffectedRows(x), ExpectedOutput::AffectedRows(y)) => {
|
||||
(OutputData::AffectedRows(x), ExpectedOutput::AffectedRows(y)) => {
|
||||
assert_eq!(*x, y, "actual: \n{}", x)
|
||||
}
|
||||
(Output::RecordBatches(_), ExpectedOutput::QueryResult(x))
|
||||
| (Output::Stream(_, _), ExpectedOutput::QueryResult(x)) => {
|
||||
(OutputData::RecordBatches(_), ExpectedOutput::QueryResult(x))
|
||||
| (OutputData::Stream(_), ExpectedOutput::QueryResult(x)) => {
|
||||
check_output_stream(output, x).await
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn check_output_stream(output: Output, expected: &str) {
|
||||
pub async fn check_output_stream(output: OutputData, expected: &str) {
|
||||
let recordbatches = match output {
|
||||
Output::Stream(stream, _) => util::collect_batches(stream).await.unwrap(),
|
||||
Output::RecordBatches(recordbatches) => recordbatches,
|
||||
OutputData::Stream(stream) => util::collect_batches(stream).await.unwrap(),
|
||||
OutputData::RecordBatches(recordbatches) => recordbatches,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let pretty_print = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -36,7 +36,7 @@ use crate::{error, Interval};
|
||||
/// - for [TimeUnit::Second]: [-262144-01-01 00:00:00, +262143-12-31 23:59:59]
|
||||
/// - for [TimeUnit::Millisecond]: [-262144-01-01 00:00:00.000, +262143-12-31 23:59:59.999]
|
||||
/// - for [TimeUnit::Microsecond]: [-262144-01-01 00:00:00.000000, +262143-12-31 23:59:59.999999]
|
||||
/// - for [TimeUnit::Nanosecond]: [1677-09-21 00:12:43.145225, 2262-04-11 23:47:16.854775807]
|
||||
/// - for [TimeUnit::Nanosecond]: [1677-09-21 00:12:43.145224192, 2262-04-11 23:47:16.854775807]
|
||||
///
|
||||
/// # Note:
|
||||
/// For values out of range, you can still store these timestamps, but while performing arithmetic
|
||||
@@ -187,28 +187,28 @@ impl Timestamp {
|
||||
Self { unit, value }
|
||||
}
|
||||
|
||||
pub fn new_second(value: i64) -> Self {
|
||||
pub const fn new_second(value: i64) -> Self {
|
||||
Self {
|
||||
value,
|
||||
unit: TimeUnit::Second,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_millisecond(value: i64) -> Self {
|
||||
pub const fn new_millisecond(value: i64) -> Self {
|
||||
Self {
|
||||
value,
|
||||
unit: TimeUnit::Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_microsecond(value: i64) -> Self {
|
||||
pub const fn new_microsecond(value: i64) -> Self {
|
||||
Self {
|
||||
value,
|
||||
unit: TimeUnit::Microsecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_nanosecond(value: i64) -> Self {
|
||||
pub const fn new_nanosecond(value: i64) -> Self {
|
||||
Self {
|
||||
value,
|
||||
unit: TimeUnit::Nanosecond,
|
||||
@@ -281,8 +281,26 @@ impl Timestamp {
|
||||
.and_then(|v| v.checked_add(micros as i64))
|
||||
.map(Timestamp::new_microsecond)
|
||||
} else {
|
||||
// Refer to <https://github.com/chronotope/chrono/issues/1289>
|
||||
//
|
||||
// subsec nanos are always non-negative, however the timestamp itself (both in seconds and in nanos) can be
|
||||
// negative. Now i64::MIN is NOT dividable by 1_000_000_000, so
|
||||
//
|
||||
// (sec * 1_000_000_000) + nsec
|
||||
//
|
||||
// may underflow (even when in theory we COULD represent the datetime as i64) because we add the non-negative
|
||||
// nanos AFTER the multiplication. This is fixed by converting the negative case to
|
||||
//
|
||||
// ((sec + 1) * 1_000_000_000) + (nsec - 1_000_000_000)
|
||||
let mut sec = sec;
|
||||
let mut nsec = nsec as i64;
|
||||
if sec < 0 && nsec > 0 {
|
||||
nsec -= 1_000_000_000;
|
||||
sec += 1;
|
||||
}
|
||||
|
||||
sec.checked_mul(1_000_000_000)
|
||||
.and_then(|v| v.checked_add(nsec as i64))
|
||||
.and_then(|v| v.checked_add(nsec))
|
||||
.map(Timestamp::new_nanosecond)
|
||||
}
|
||||
}
|
||||
@@ -425,6 +443,20 @@ impl Timestamp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Timestamp {
|
||||
pub const MIN_SECOND: Self = Self::new_second(-8_334_601_228_800);
|
||||
pub const MAX_SECOND: Self = Self::new_second(8_210_266_876_799);
|
||||
|
||||
pub const MIN_MILLISECOND: Self = Self::new_millisecond(-8_334_601_228_800_000);
|
||||
pub const MAX_MILLISECOND: Self = Self::new_millisecond(8_210_266_876_799_999);
|
||||
|
||||
pub const MIN_MICROSECOND: Self = Self::new_microsecond(-8_334_601_228_800_000_000);
|
||||
pub const MAX_MICROSECOND: Self = Self::new_microsecond(8_210_266_876_799_999_999);
|
||||
|
||||
pub const MIN_NANOSECOND: Self = Self::new_nanosecond(i64::MIN);
|
||||
pub const MAX_NANOSECOND: Self = Self::new_nanosecond(i64::MAX);
|
||||
}
|
||||
|
||||
/// Converts the naive datetime (which has no specific timezone) to a
|
||||
/// nanosecond epoch timestamp in UTC.
|
||||
fn naive_datetime_to_timestamp(
|
||||
@@ -586,6 +618,7 @@ impl Hash for Timestamp {
|
||||
mod tests {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
|
||||
use chrono_tz::Tz;
|
||||
use rand::Rng;
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -1297,7 +1330,7 @@ mod tests {
|
||||
"+262142-12-31 23:59:59Z",
|
||||
"+262142-12-31 23:59:59.999Z",
|
||||
"+262142-12-31 23:59:59.999999Z",
|
||||
"1677-09-21 00:12:43.145225Z",
|
||||
"1677-09-21 00:12:43.145224192Z",
|
||||
"2262-04-11 23:47:16.854775807Z",
|
||||
"+100000-01-01 00:00:01.5Z",
|
||||
];
|
||||
@@ -1306,4 +1339,47 @@ mod tests {
|
||||
Timestamp::from_str_utc(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_min_nanos_roundtrip() {
|
||||
let (sec, nsec) = Timestamp::MIN_NANOSECOND.split();
|
||||
let ts = Timestamp::from_splits(sec, nsec).unwrap();
|
||||
assert_eq!(Timestamp::MIN_NANOSECOND, ts);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_bound_format() {
|
||||
assert_eq!(
|
||||
"1677-09-21 00:12:43.145224192",
|
||||
Timestamp::MIN_NANOSECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"2262-04-11 23:47:16.854775807",
|
||||
Timestamp::MAX_NANOSECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"-262143-01-01 00:00:00",
|
||||
Timestamp::MIN_MICROSECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"+262142-12-31 23:59:59.999999",
|
||||
Timestamp::MAX_MICROSECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"-262143-01-01 00:00:00",
|
||||
Timestamp::MIN_MILLISECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"+262142-12-31 23:59:59.999",
|
||||
Timestamp::MAX_MILLISECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"-262143-01-01 00:00:00",
|
||||
Timestamp::MIN_SECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
assert_eq!(
|
||||
"+262142-12-31 23:59:59",
|
||||
Timestamp::MAX_SECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,12 +12,16 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
serde_with.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json.workspace = true
|
||||
|
||||
33
src/common/wal/src/error.rs
Normal file
33
src/common/wal/src/error.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::Snafu;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to resolve endpoint {:?}", broker_endpoint))]
|
||||
ResolveEndpoint {
|
||||
broker_endpoint: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find ipv4 endpoint: {:?}", broker_endpoint))]
|
||||
EndpointIPV4NotFound { broker_endpoint: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -12,9 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use error::{EndpointIPV4NotFoundSnafu, ResolveEndpointSnafu, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::net;
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod options;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
@@ -30,3 +38,52 @@ pub enum TopicSelectorType {
|
||||
#[default]
|
||||
RoundRobin,
|
||||
}
|
||||
|
||||
pub async fn resolve_to_ipv4<T: AsRef<str>>(endpoints: &[T]) -> Result<Vec<String>> {
|
||||
futures_util::future::try_join_all(endpoints.iter().map(resolve_to_ipv4_one)).await
|
||||
}
|
||||
|
||||
async fn resolve_to_ipv4_one<T: AsRef<str>>(endpoint: T) -> Result<String> {
|
||||
let endpoint = endpoint.as_ref();
|
||||
net::lookup_host(endpoint)
|
||||
.await
|
||||
.context(ResolveEndpointSnafu {
|
||||
broker_endpoint: endpoint,
|
||||
})?
|
||||
.find(SocketAddr::is_ipv4)
|
||||
.map(|addr| addr.to_string())
|
||||
.context(EndpointIPV4NotFoundSnafu {
|
||||
broker_endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
|
||||
// test for resolve_broker_endpoint
|
||||
#[tokio::test]
|
||||
async fn test_valid_host() {
|
||||
let host = "localhost:9092";
|
||||
let got = resolve_to_ipv4_one(host).await;
|
||||
assert_eq!(got.unwrap(), "127.0.0.1:9092");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_valid_host_ipv6() {
|
||||
// the host is valid, it is an IPv6 address, but we only accept IPv4 addresses
|
||||
let host = "::1:9092";
|
||||
let got = resolve_to_ipv4_one(host).await;
|
||||
assert_matches!(got.unwrap_err(), Error::EndpointIPV4NotFound { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_host() {
|
||||
let host = "non-exist-host:9092";
|
||||
let got = resolve_to_ipv4_one(host).await;
|
||||
assert_matches!(got.unwrap_err(), Error::ResolveEndpoint { .. });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ tokio-stream = { workspace = true, features = ["net"] }
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
tower = { version = "0.4", features = ["full"] }
|
||||
tower-http = { version = "0.3", features = ["full"] }
|
||||
tower-http = { version = "0.4", features = ["full"] }
|
||||
url = "2.3.1"
|
||||
uuid.workspace = true
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use common_error::ext::BoxedError;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::DfPhysicalPlanAdapter;
|
||||
use common_query::{DfPhysicalPlan, Output};
|
||||
use common_query::{DfPhysicalPlan, OutputData};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_runtime::Runtime;
|
||||
use common_telemetry::tracing::{self, info_span};
|
||||
@@ -651,11 +651,11 @@ impl RegionServerInner {
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)?;
|
||||
|
||||
match result {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => {
|
||||
match result.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => {
|
||||
UnsupportedOutputSnafu { expected: "stream" }.fail()
|
||||
}
|
||||
Output::Stream(stream, _) => Ok(stream),
|
||||
OutputData::Stream(stream) => Ok(stream),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -124,37 +124,45 @@ impl Display for Value {
|
||||
}
|
||||
}
|
||||
|
||||
impl Value {
|
||||
/// Returns data type of the value.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the data type is not supported.
|
||||
pub fn data_type(&self) -> ConcreteDataType {
|
||||
match self {
|
||||
Value::Null => ConcreteDataType::null_datatype(),
|
||||
Value::Boolean(_) => ConcreteDataType::boolean_datatype(),
|
||||
Value::UInt8(_) => ConcreteDataType::uint8_datatype(),
|
||||
Value::UInt16(_) => ConcreteDataType::uint16_datatype(),
|
||||
Value::UInt32(_) => ConcreteDataType::uint32_datatype(),
|
||||
Value::UInt64(_) => ConcreteDataType::uint64_datatype(),
|
||||
Value::Int8(_) => ConcreteDataType::int8_datatype(),
|
||||
Value::Int16(_) => ConcreteDataType::int16_datatype(),
|
||||
Value::Int32(_) => ConcreteDataType::int32_datatype(),
|
||||
Value::Int64(_) => ConcreteDataType::int64_datatype(),
|
||||
Value::Float32(_) => ConcreteDataType::float32_datatype(),
|
||||
Value::Float64(_) => ConcreteDataType::float64_datatype(),
|
||||
Value::String(_) => ConcreteDataType::string_datatype(),
|
||||
Value::Binary(_) => ConcreteDataType::binary_datatype(),
|
||||
Value::Date(_) => ConcreteDataType::date_datatype(),
|
||||
Value::DateTime(_) => ConcreteDataType::datetime_datatype(),
|
||||
Value::Time(t) => ConcreteDataType::time_datatype(*t.unit()),
|
||||
Value::Timestamp(v) => ConcreteDataType::timestamp_datatype(v.unit()),
|
||||
Value::Interval(v) => ConcreteDataType::interval_datatype(v.unit()),
|
||||
Value::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()),
|
||||
Value::Duration(d) => ConcreteDataType::duration_datatype(d.unit()),
|
||||
Value::Decimal128(d) => ConcreteDataType::decimal128_datatype(d.precision(), d.scale()),
|
||||
macro_rules! define_data_type_func {
|
||||
($struct: ident) => {
|
||||
/// Returns data type of the value.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the data type is not supported.
|
||||
pub fn data_type(&self) -> ConcreteDataType {
|
||||
match self {
|
||||
$struct::Null => ConcreteDataType::null_datatype(),
|
||||
$struct::Boolean(_) => ConcreteDataType::boolean_datatype(),
|
||||
$struct::UInt8(_) => ConcreteDataType::uint8_datatype(),
|
||||
$struct::UInt16(_) => ConcreteDataType::uint16_datatype(),
|
||||
$struct::UInt32(_) => ConcreteDataType::uint32_datatype(),
|
||||
$struct::UInt64(_) => ConcreteDataType::uint64_datatype(),
|
||||
$struct::Int8(_) => ConcreteDataType::int8_datatype(),
|
||||
$struct::Int16(_) => ConcreteDataType::int16_datatype(),
|
||||
$struct::Int32(_) => ConcreteDataType::int32_datatype(),
|
||||
$struct::Int64(_) => ConcreteDataType::int64_datatype(),
|
||||
$struct::Float32(_) => ConcreteDataType::float32_datatype(),
|
||||
$struct::Float64(_) => ConcreteDataType::float64_datatype(),
|
||||
$struct::String(_) => ConcreteDataType::string_datatype(),
|
||||
$struct::Binary(_) => ConcreteDataType::binary_datatype(),
|
||||
$struct::Date(_) => ConcreteDataType::date_datatype(),
|
||||
$struct::DateTime(_) => ConcreteDataType::datetime_datatype(),
|
||||
$struct::Time(t) => ConcreteDataType::time_datatype(*t.unit()),
|
||||
$struct::Timestamp(v) => ConcreteDataType::timestamp_datatype(v.unit()),
|
||||
$struct::Interval(v) => ConcreteDataType::interval_datatype(v.unit()),
|
||||
$struct::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()),
|
||||
$struct::Duration(d) => ConcreteDataType::duration_datatype(d.unit()),
|
||||
$struct::Decimal128(d) => {
|
||||
ConcreteDataType::decimal128_datatype(d.precision(), d.scale())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl Value {
|
||||
define_data_type_func!(Value);
|
||||
|
||||
/// Returns true if this is a null value.
|
||||
pub fn is_null(&self) -> bool {
|
||||
@@ -250,6 +258,17 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to u64. Return None if value is not a valid uint64 data type.
|
||||
pub fn as_u64(&self) -> Option<u64> {
|
||||
match self {
|
||||
Value::UInt8(v) => Some(*v as _),
|
||||
Value::UInt16(v) => Some(*v as _),
|
||||
Value::UInt32(v) => Some(*v as _),
|
||||
Value::UInt64(v) => Some(*v),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the logical type of the value.
|
||||
pub fn logical_type_id(&self) -> LogicalTypeId {
|
||||
match self {
|
||||
@@ -351,6 +370,36 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TryAsPrimitive<T: LogicalPrimitiveType> {
|
||||
fn try_as_primitive(&self) -> Option<T::Native>;
|
||||
}
|
||||
|
||||
macro_rules! impl_try_as_primitive {
|
||||
($Type: ident, $Variant: ident) => {
|
||||
impl TryAsPrimitive<crate::types::$Type> for Value {
|
||||
fn try_as_primitive(
|
||||
&self,
|
||||
) -> Option<<crate::types::$Type as crate::types::LogicalPrimitiveType>::Native> {
|
||||
match self {
|
||||
Value::$Variant(v) => Some((*v).into()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_try_as_primitive!(Int8Type, Int8);
|
||||
impl_try_as_primitive!(Int16Type, Int16);
|
||||
impl_try_as_primitive!(Int32Type, Int32);
|
||||
impl_try_as_primitive!(Int64Type, Int64);
|
||||
impl_try_as_primitive!(UInt8Type, UInt8);
|
||||
impl_try_as_primitive!(UInt16Type, UInt16);
|
||||
impl_try_as_primitive!(UInt32Type, UInt32);
|
||||
impl_try_as_primitive!(UInt64Type, UInt64);
|
||||
impl_try_as_primitive!(Float32Type, Float32);
|
||||
impl_try_as_primitive!(Float64Type, Float64);
|
||||
|
||||
pub fn to_null_scalar_value(output_type: &ConcreteDataType) -> Result<ScalarValue> {
|
||||
Ok(match output_type {
|
||||
ConcreteDataType::Null(_) => ScalarValue::Null,
|
||||
@@ -938,6 +987,8 @@ macro_rules! impl_as_for_value_ref {
|
||||
}
|
||||
|
||||
impl<'a> ValueRef<'a> {
|
||||
define_data_type_func!(ValueRef);
|
||||
|
||||
/// Returns true if this is null.
|
||||
pub fn is_null(&self) -> bool {
|
||||
matches!(self, ValueRef::Null)
|
||||
@@ -1143,6 +1194,14 @@ impl<'a> ListValueRef<'a> {
|
||||
ListValueRef::Ref { val } => Value::List(val.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the inner element's data type.
|
||||
fn datatype(&self) -> ConcreteDataType {
|
||||
match self {
|
||||
ListValueRef::Indexed { vector, .. } => vector.data_type(),
|
||||
ListValueRef::Ref { val } => val.datatype().clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for ListValueRef<'a> {
|
||||
@@ -2358,4 +2417,12 @@ mod tests {
|
||||
);
|
||||
check_value_ref_size_eq(&ValueRef::Decimal128(Decimal128::new(1234, 3, 1)), 32)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incorrect_default_value_issue_3479() {
|
||||
let value = OrderedF64::from(0.047318541668048164);
|
||||
let serialized = serde_json::to_string(&value).unwrap();
|
||||
let deserialized: OrderedF64 = serde_json::from_str(&serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,8 +22,9 @@ use std::task::{Context, Poll};
|
||||
use common_datasource::object_store::build_backend;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::prelude::Expr;
|
||||
use common_recordbatch::adapter::RecordBatchMetrics;
|
||||
use common_recordbatch::error::{CastVectorSnafu, ExternalSnafu, Result as RecordBatchResult};
|
||||
use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
|
||||
use common_recordbatch::{OrderOption, RecordBatch, RecordBatchStream, SendableRecordBatchStream};
|
||||
use datafusion::logical_expr::utils as df_logical_expr_utils;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
@@ -151,6 +152,14 @@ impl RecordBatchStream for FileToScanRegionStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.scan_schema.clone()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[OrderOption]> {
|
||||
None
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<RecordBatchMetrics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for FileToScanRegionStream {
|
||||
|
||||
@@ -18,6 +18,7 @@ common-query.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
hydroflow = "0.5.0"
|
||||
itertools.workspace = true
|
||||
num-traits = "0.2"
|
||||
@@ -27,3 +28,6 @@ session.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -24,5 +24,6 @@ mod scalar;
|
||||
pub(crate) use error::{EvalError, InvalidArgumentSnafu, OptimizeSnafu};
|
||||
pub(crate) use func::{BinaryFunc, UnaryFunc, UnmaterializableFunc, VariadicFunc};
|
||||
pub(crate) use id::{GlobalId, Id, LocalId};
|
||||
pub(crate) use linear::{MapFilterProject, MfpPlan, SafeMfpPlan};
|
||||
pub(crate) use relation::{AggregateExpr, AggregateFunc};
|
||||
pub(crate) use scalar::ScalarExpr;
|
||||
|
||||
@@ -61,4 +61,7 @@ pub enum EvalError {
|
||||
|
||||
#[snafu(display("Unsupported temporal filter: {reason}"))]
|
||||
UnsupportedTemporalFilter { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Overflowed during evaluation"))]
|
||||
Overflow { location: Location },
|
||||
}
|
||||
|
||||
@@ -16,18 +16,28 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use datatypes::value::Value;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::expr::error::EvalError;
|
||||
use crate::expr::{Id, LocalId, ScalarExpr};
|
||||
use crate::expr::{Id, InvalidArgumentSnafu, LocalId, ScalarExpr};
|
||||
use crate::repr::{self, value_to_internal_ts, Diff, Row};
|
||||
|
||||
/// A compound operator that can be applied row-by-row.
|
||||
///
|
||||
/// In practice, this operator is a sequence of map, filter, and project in arbitrary order,
|
||||
/// which can and is stored by reordering the sequence's
|
||||
/// apply order to a `map` first, `filter` second and `project` third order.
|
||||
///
|
||||
/// input is a row(a sequence of values), which is also being used for store intermediate results,
|
||||
/// like `map` operator can append new columns to the row according to it's expressions,
|
||||
/// `filter` operator decide whether this entire row can even be output by decide whether the row satisfy the predicates,
|
||||
/// `project` operator decide which columns of the row should be output.
|
||||
///
|
||||
/// This operator integrates the map, filter, and project operators.
|
||||
/// It applies a sequences of map expressions, which are allowed to
|
||||
/// refer to previous expressions, interleaved with predicates which
|
||||
/// must be satisfied for an output to be produced. If all predicates
|
||||
/// evaluate to `Datum::True` the data at the identified columns are
|
||||
/// evaluate to `Value::Boolean(True)` the data at the identified columns are
|
||||
/// collected and produced as output in a packed `Row`.
|
||||
///
|
||||
/// This operator is a "builder" and its contents may contain expressions
|
||||
@@ -35,7 +45,7 @@ use crate::repr::{self, value_to_internal_ts, Diff, Row};
|
||||
/// expressions in `self.expressions`, even though this is not something
|
||||
/// we can directly evaluate. The plan creation methods will defensively
|
||||
/// ensure that the right thing happens.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]
|
||||
pub struct MapFilterProject {
|
||||
/// A sequence of expressions that should be appended to the row.
|
||||
///
|
||||
@@ -48,8 +58,10 @@ pub struct MapFilterProject {
|
||||
/// Each entry is prepended with a column identifier indicating
|
||||
/// the column *before* which the predicate should first be applied.
|
||||
/// Most commonly this would be one plus the largest column identifier
|
||||
/// in the predicate's support, but it could be larger to implement
|
||||
/// in the predicate's referred columns, but it could be larger to implement
|
||||
/// guarded evaluation of predicates.
|
||||
/// Put it in another word, the first element of the tuple means
|
||||
/// the predicates can't be evaluated until that number of columns is formed.
|
||||
///
|
||||
/// This list should be sorted by the first field.
|
||||
pub predicates: Vec<(usize, ScalarExpr)>,
|
||||
@@ -62,12 +74,447 @@ pub struct MapFilterProject {
|
||||
pub input_arity: usize,
|
||||
}
|
||||
|
||||
impl MapFilterProject {
|
||||
/// Create a no-op operator for an input of a supplied arity.
|
||||
pub fn new(input_arity: usize) -> Self {
|
||||
Self {
|
||||
expressions: Vec::new(),
|
||||
predicates: Vec::new(),
|
||||
projection: (0..input_arity).collect(),
|
||||
input_arity,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given two mfps, return an mfp that applies one
|
||||
/// followed by the other.
|
||||
/// Note that the arguments are in the opposite order
|
||||
/// from how function composition is usually written in mathematics.
|
||||
pub fn compose(before: Self, after: Self) -> Result<Self, EvalError> {
|
||||
let (m, f, p) = after.into_map_filter_project();
|
||||
before.map(m)?.filter(f)?.project(p)
|
||||
}
|
||||
|
||||
/// True if the operator describes the identity transformation.
|
||||
pub fn is_identity(&self) -> bool {
|
||||
self.expressions.is_empty()
|
||||
&& self.predicates.is_empty()
|
||||
// identity if projection is the identity permutation
|
||||
&& self.projection.len() == self.input_arity
|
||||
&& self.projection.iter().enumerate().all(|(i, p)| i == *p)
|
||||
}
|
||||
|
||||
/// Retain only the indicated columns in the presented order.
|
||||
///
|
||||
/// i.e. before: `self.projection = [1, 2, 0], columns = [1, 0]`
|
||||
/// ```mermaid
|
||||
/// flowchart TD
|
||||
/// col-0
|
||||
/// col-1
|
||||
/// col-2
|
||||
/// projection --> |0|col-1
|
||||
/// projection --> |1|col-2
|
||||
/// projection --> |2|col-0
|
||||
/// ```
|
||||
///
|
||||
/// after: `self.projection = [2, 1]`
|
||||
/// ```mermaid
|
||||
/// flowchart TD
|
||||
/// col-0
|
||||
/// col-1
|
||||
/// col-2
|
||||
/// project("project:[1,2,0]")
|
||||
/// project
|
||||
/// project -->|0| col-1
|
||||
/// project -->|1| col-2
|
||||
/// project -->|2| col-0
|
||||
/// new_project("apply new project:[1,0]")
|
||||
/// new_project -->|0| col-2
|
||||
/// new_project -->|1| col-1
|
||||
/// ```
|
||||
pub fn project<I>(mut self, columns: I) -> Result<Self, EvalError>
|
||||
where
|
||||
I: IntoIterator<Item = usize> + std::fmt::Debug,
|
||||
{
|
||||
self.projection = columns
|
||||
.into_iter()
|
||||
.map(|c| self.projection.get(c).cloned().ok_or(c))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|c| {
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"column index {} out of range, expected at most {} columns",
|
||||
c,
|
||||
self.projection.len()
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Retain only rows satisfying these predicates.
|
||||
///
|
||||
/// This method introduces predicates as eagerly as they can be evaluated,
|
||||
/// which may not be desired for predicates that may cause exceptions.
|
||||
/// If fine manipulation is required, the predicates can be added manually.
|
||||
///
|
||||
/// simply added to the end of the predicates list
|
||||
///
|
||||
/// while paying attention to column references maintained by `self.projection`
|
||||
///
|
||||
/// so `self.projection = [1, 2, 0], filter = [0]+[1]>0`:
|
||||
/// becomes:
|
||||
/// ```mermaid
|
||||
/// flowchart TD
|
||||
/// col-0
|
||||
/// col-1
|
||||
/// col-2
|
||||
/// project("first project:[1,2,0]")
|
||||
/// project
|
||||
/// project -->|0| col-1
|
||||
/// project -->|1| col-2
|
||||
/// project -->|2| col-0
|
||||
/// filter("then filter:[0]+[1]>0")
|
||||
/// filter -->|0| col-1
|
||||
/// filter --> |1| col-2
|
||||
/// ```
|
||||
pub fn filter<I>(mut self, predicates: I) -> Result<Self, EvalError>
|
||||
where
|
||||
I: IntoIterator<Item = ScalarExpr>,
|
||||
{
|
||||
for mut predicate in predicates {
|
||||
// Correct column references.
|
||||
predicate.permute(&self.projection[..])?;
|
||||
|
||||
// Validate column references.
|
||||
let referred_columns = predicate.get_all_ref_columns();
|
||||
for c in referred_columns.iter() {
|
||||
// current row len include input columns and previous number of expressions
|
||||
let cur_row_len = self.input_arity + self.expressions.len();
|
||||
ensure!(
|
||||
*c < cur_row_len,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"column index {} out of range, expected at most {} columns",
|
||||
c, cur_row_len
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Insert predicate as eagerly as it can be evaluated:
|
||||
// just after the largest column in its support is formed.
|
||||
let max_support = referred_columns
|
||||
.into_iter()
|
||||
.max()
|
||||
.map(|c| c + 1)
|
||||
.unwrap_or(0);
|
||||
self.predicates.push((max_support, predicate))
|
||||
}
|
||||
// Stable sort predicates by position at which they take effect.
|
||||
self.predicates
|
||||
.sort_by_key(|(position, _predicate)| *position);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Append the result of evaluating expressions to each row.
|
||||
///
|
||||
/// simply append `expressions` to `self.expressions`
|
||||
///
|
||||
/// while paying attention to column references maintained by `self.projection`
|
||||
///
|
||||
/// hence, before apply map with a previously non-trivial projection would be like:
|
||||
/// before:
|
||||
/// ```mermaid
|
||||
/// flowchart TD
|
||||
/// col-0
|
||||
/// col-1
|
||||
/// col-2
|
||||
/// projection --> |0|col-1
|
||||
/// projection --> |1|col-2
|
||||
/// projection --> |2|col-0
|
||||
/// ```
|
||||
/// after apply map:
|
||||
/// ```mermaid
|
||||
/// flowchart TD
|
||||
/// col-0
|
||||
/// col-1
|
||||
/// col-2
|
||||
/// project("project:[1,2,0]")
|
||||
/// project
|
||||
/// project -->|0| col-1
|
||||
/// project -->|1| col-2
|
||||
/// project -->|2| col-0
|
||||
/// map("map:[0]/[1]/[2]")
|
||||
/// map -->|0|col-1
|
||||
/// map -->|1|col-2
|
||||
/// map -->|2|col-0
|
||||
/// ```
|
||||
pub fn map<I>(mut self, expressions: I) -> Result<Self, EvalError>
|
||||
where
|
||||
I: IntoIterator<Item = ScalarExpr>,
|
||||
{
|
||||
for mut expression in expressions {
|
||||
// Correct column references.
|
||||
expression.permute(&self.projection[..])?;
|
||||
|
||||
// Validate column references.
|
||||
for c in expression.get_all_ref_columns().into_iter() {
|
||||
// current row len include input columns and previous number of expressions
|
||||
let current_row_len = self.input_arity + self.expressions.len();
|
||||
ensure!(
|
||||
c < current_row_len,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"column index {} out of range, expected at most {} columns",
|
||||
c, current_row_len
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Introduce expression and produce as output.
|
||||
self.expressions.push(expression);
|
||||
// Expression by default is projected to output.
|
||||
let cur_expr_col_num = self.input_arity + self.expressions.len() - 1;
|
||||
self.projection.push(cur_expr_col_num);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Like [`MapFilterProject::as_map_filter_project`], but consumes `self` rather than cloning.
|
||||
pub fn into_map_filter_project(self) -> (Vec<ScalarExpr>, Vec<ScalarExpr>, Vec<usize>) {
|
||||
let predicates = self
|
||||
.predicates
|
||||
.into_iter()
|
||||
.map(|(_pos, predicate)| predicate)
|
||||
.collect();
|
||||
(self.expressions, predicates, self.projection)
|
||||
}
|
||||
|
||||
/// As the arguments to `Map`, `Filter`, and `Project` operators.
|
||||
///
|
||||
/// In principle, this operator can be implemented as a sequence of
|
||||
/// more elemental operators, likely less efficiently.
|
||||
pub fn as_map_filter_project(&self) -> (Vec<ScalarExpr>, Vec<ScalarExpr>, Vec<usize>) {
|
||||
self.clone().into_map_filter_project()
|
||||
}
|
||||
}
|
||||
|
||||
impl MapFilterProject {
|
||||
pub fn optimize(&mut self) {
|
||||
// TODO(discord9): optimize
|
||||
}
|
||||
|
||||
/// Convert the `MapFilterProject` into a staged evaluation plan.
|
||||
///
|
||||
/// The main behavior is extract temporal predicates, which cannot be evaluated
|
||||
/// using the standard machinery.
|
||||
pub fn into_plan(self) -> Result<MfpPlan, EvalError> {
|
||||
MfpPlan::create_from(self)
|
||||
}
|
||||
|
||||
/// Lists input columns whose values are used in outputs.
|
||||
///
|
||||
/// It is entirely appropriate to determine the demand of an instance
|
||||
/// and then both apply a projection to the subject of the instance and
|
||||
/// `self.permute` this instance.
|
||||
pub fn demand(&self) -> BTreeSet<usize> {
|
||||
let mut demanded = BTreeSet::new();
|
||||
// first, get all columns referenced by predicates
|
||||
for (_index, pred) in self.predicates.iter() {
|
||||
demanded.extend(pred.get_all_ref_columns());
|
||||
}
|
||||
// then, get columns referenced by projection which is direct output
|
||||
demanded.extend(self.projection.iter().cloned());
|
||||
|
||||
// check every expressions, if a expression is contained in demanded, then all columns it referenced should be added to demanded
|
||||
for index in (0..self.expressions.len()).rev() {
|
||||
if demanded.contains(&(self.input_arity + index)) {
|
||||
demanded.extend(self.expressions[index].get_all_ref_columns());
|
||||
}
|
||||
}
|
||||
|
||||
// only keep demanded columns that are in input
|
||||
demanded.retain(|col| col < &self.input_arity);
|
||||
demanded
|
||||
}
|
||||
|
||||
/// Update input column references, due to an input projection or permutation.
|
||||
///
|
||||
/// The `shuffle` argument remaps expected column identifiers to new locations,
|
||||
/// with the expectation that `shuffle` describes all input columns, and so the
|
||||
/// intermediate results will be able to start at position `shuffle.len()`.
|
||||
///
|
||||
/// The supplied `shuffle` may not list columns that are not "demanded" by the
|
||||
/// instance, and so we should ensure that `self` is optimized to not reference
|
||||
/// columns that are not demanded.
|
||||
pub fn permute(
|
||||
&mut self,
|
||||
mut shuffle: BTreeMap<usize, usize>,
|
||||
new_input_arity: usize,
|
||||
) -> Result<(), EvalError> {
|
||||
// check shuffle is valid
|
||||
let demand = self.demand();
|
||||
for d in demand {
|
||||
ensure!(
|
||||
shuffle.contains_key(&d),
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"Demanded column {} is not in shuffle's keys: {:?}",
|
||||
d,
|
||||
shuffle.keys()
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
ensure!(
|
||||
shuffle.len() <= new_input_arity,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"shuffle's length {} is greater than new_input_arity {}",
|
||||
shuffle.len(),
|
||||
self.input_arity
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
// decompose self into map, filter, project for ease of manipulation
|
||||
let (mut map, mut filter, mut project) = self.as_map_filter_project();
|
||||
for index in 0..map.len() {
|
||||
// Intermediate columns are just shifted.
|
||||
shuffle.insert(self.input_arity + index, new_input_arity + index);
|
||||
}
|
||||
|
||||
for expr in map.iter_mut() {
|
||||
expr.permute_map(&shuffle)?;
|
||||
}
|
||||
for pred in filter.iter_mut() {
|
||||
pred.permute_map(&shuffle)?;
|
||||
}
|
||||
let new_row_len = new_input_arity + map.len();
|
||||
for proj in project.iter_mut() {
|
||||
ensure!(
|
||||
shuffle[proj] < new_row_len,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"shuffled column index {} out of range, expected at most {} columns",
|
||||
shuffle[proj], new_row_len
|
||||
)
|
||||
}
|
||||
);
|
||||
*proj = shuffle[proj];
|
||||
}
|
||||
*self = Self::new(new_input_arity)
|
||||
.map(map)?
|
||||
.filter(filter)?
|
||||
.project(project)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper type which indicates it is safe to simply evaluate all expressions.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd)]
|
||||
pub struct SafeMfpPlan {
|
||||
pub(crate) mfp: MapFilterProject,
|
||||
}
|
||||
|
||||
impl SafeMfpPlan {
|
||||
/// See [`MapFilterProject::permute`].
|
||||
pub fn permute(
|
||||
&mut self,
|
||||
map: BTreeMap<usize, usize>,
|
||||
new_arity: usize,
|
||||
) -> Result<(), EvalError> {
|
||||
self.mfp.permute(map, new_arity)
|
||||
}
|
||||
|
||||
/// Evaluates the linear operator on a supplied list of datums.
|
||||
///
|
||||
/// The arguments are the initial datums associated with the row,
|
||||
/// and an appropriately lifetimed arena for temporary allocations
|
||||
/// needed by scalar evaluation.
|
||||
///
|
||||
/// An `Ok` result will either be `None` if any predicate did not
|
||||
/// evaluate to `Value::Boolean(true)`, or the values of the columns listed
|
||||
/// by `self.projection` if all predicates passed. If an error
|
||||
/// occurs in the evaluation it is returned as an `Err` variant.
|
||||
/// As the evaluation exits early with failed predicates, it may
|
||||
/// miss some errors that would occur later in evaluation.
|
||||
///
|
||||
/// The `row` is not cleared first, but emptied if the function
|
||||
/// returns `Ok(Some(row)).
|
||||
#[inline(always)]
|
||||
pub fn evaluate_into(
|
||||
&self,
|
||||
values: &mut Vec<Value>,
|
||||
row_buf: &mut Row,
|
||||
) -> Result<Option<Row>, EvalError> {
|
||||
ensure!(
|
||||
values.len() == self.mfp.input_arity,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"values length {} is not equal to input_arity {}",
|
||||
values.len(),
|
||||
self.mfp.input_arity
|
||||
),
|
||||
}
|
||||
);
|
||||
let passed_predicates = self.evaluate_inner(values)?;
|
||||
|
||||
if !passed_predicates {
|
||||
Ok(None)
|
||||
} else {
|
||||
row_buf.clear();
|
||||
row_buf.extend(self.mfp.projection.iter().map(|c| values[*c].clone()));
|
||||
Ok(Some(row_buf.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
/// A version of `evaluate` which produces an iterator over `Datum`
|
||||
/// as output.
|
||||
///
|
||||
/// This version can be useful when one wants to capture the resulting
|
||||
/// datums without packing and then unpacking a row.
|
||||
#[inline(always)]
|
||||
pub fn evaluate_iter<'a>(
|
||||
&'a self,
|
||||
datums: &'a mut Vec<Value>,
|
||||
) -> Result<Option<impl Iterator<Item = Value> + 'a>, EvalError> {
|
||||
let passed_predicates = self.evaluate_inner(datums)?;
|
||||
if !passed_predicates {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(
|
||||
self.mfp.projection.iter().map(move |i| datums[*i].clone()),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Populates `values` with `self.expressions` and tests `self.predicates`.
|
||||
///
|
||||
/// This does not apply `self.projection`, which is up to the calling method.
|
||||
pub fn evaluate_inner(&self, values: &mut Vec<Value>) -> Result<bool, EvalError> {
|
||||
let mut expression = 0;
|
||||
for (support, predicate) in self.mfp.predicates.iter() {
|
||||
while self.mfp.input_arity + expression < *support {
|
||||
values.push(self.mfp.expressions[expression].eval(&values[..])?);
|
||||
expression += 1;
|
||||
}
|
||||
if predicate.eval(&values[..])? != Value::Boolean(true) {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
// while evaluated expressions are less than total expressions, keep evaluating
|
||||
while expression < self.mfp.expressions.len() {
|
||||
values.push(self.mfp.expressions[expression].eval(&values[..])?);
|
||||
expression += 1;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for SafeMfpPlan {
|
||||
type Target = MapFilterProject;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@@ -94,3 +541,383 @@ pub struct MfpPlan {
|
||||
/// Expressions that when evaluated upper-bound `MzNow`.
|
||||
pub(crate) upper_bounds: Vec<ScalarExpr>,
|
||||
}
|
||||
|
||||
impl MfpPlan {
|
||||
/// find `now` in `predicates` and put them into lower/upper temporal bounds for temporal filter to use
|
||||
pub fn create_from(mut mfp: MapFilterProject) -> Result<Self, EvalError> {
|
||||
let mut lower_bounds = Vec::new();
|
||||
let mut upper_bounds = Vec::new();
|
||||
|
||||
let mut temporal = Vec::new();
|
||||
|
||||
// Optimize, to ensure that temporal predicates are move in to `mfp.predicates`.
|
||||
mfp.optimize();
|
||||
|
||||
mfp.predicates.retain(|(_position, predicate)| {
|
||||
if predicate.contains_temporal() {
|
||||
temporal.push(predicate.clone());
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
for predicate in temporal {
|
||||
let (lower, upper) = predicate.extract_bound()?;
|
||||
lower_bounds.extend(lower);
|
||||
upper_bounds.extend(upper);
|
||||
}
|
||||
Ok(Self {
|
||||
mfp: SafeMfpPlan { mfp },
|
||||
lower_bounds,
|
||||
upper_bounds,
|
||||
})
|
||||
}
|
||||
|
||||
/// Indicates if the planned `MapFilterProject` emits exactly its inputs as outputs.
|
||||
pub fn is_identity(&self) -> bool {
|
||||
self.mfp.mfp.is_identity() && self.lower_bounds.is_empty() && self.upper_bounds.is_empty()
|
||||
}
|
||||
|
||||
/// if `lower_bound <= sys_time < upper_bound`, return `[(data, sys_time, +1), (data, min_upper_bound, -1)]`
|
||||
///
|
||||
/// else if `sys_time < lower_bound`, return `[(data, lower_bound, +1), (data, min_upper_bound, -1)]`
|
||||
///
|
||||
/// else if `sys_time >= upper_bound`, return `[None, None]`
|
||||
///
|
||||
/// if eval error appeal in any of those process, corresponding result will be `Err`
|
||||
pub fn evaluate<E: From<EvalError>>(
|
||||
&self,
|
||||
values: &mut Vec<Value>,
|
||||
sys_time: repr::Timestamp,
|
||||
diff: Diff,
|
||||
) -> impl Iterator<Item = Result<(Row, repr::Timestamp, Diff), (E, repr::Timestamp, Diff)>>
|
||||
{
|
||||
match self.mfp.evaluate_inner(values) {
|
||||
Err(e) => {
|
||||
return Some(Err((e.into(), sys_time, diff)))
|
||||
.into_iter()
|
||||
.chain(None);
|
||||
}
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
return None.into_iter().chain(None);
|
||||
}
|
||||
}
|
||||
|
||||
let mut lower_bound = sys_time;
|
||||
let mut upper_bound = None;
|
||||
|
||||
// Track whether we have seen a null in either bound, as this should
|
||||
// prevent the record from being produced at any time.
|
||||
let mut null_eval = false;
|
||||
let ret_err = |e: EvalError| {
|
||||
Some(Err((e.into(), sys_time, diff)))
|
||||
.into_iter()
|
||||
.chain(None)
|
||||
};
|
||||
for l in self.lower_bounds.iter() {
|
||||
match l.eval(values) {
|
||||
Ok(v) => {
|
||||
if v.is_null() {
|
||||
null_eval = true;
|
||||
continue;
|
||||
}
|
||||
match value_to_internal_ts(v) {
|
||||
Ok(ts) => lower_bound = lower_bound.max(ts),
|
||||
Err(e) => return ret_err(e),
|
||||
}
|
||||
}
|
||||
Err(e) => return ret_err(e),
|
||||
};
|
||||
}
|
||||
|
||||
for u in self.upper_bounds.iter() {
|
||||
if upper_bound != Some(lower_bound) {
|
||||
match u.eval(values) {
|
||||
Err(e) => return ret_err(e),
|
||||
Ok(val) => {
|
||||
if val.is_null() {
|
||||
null_eval = true;
|
||||
continue;
|
||||
}
|
||||
let ts = match value_to_internal_ts(val) {
|
||||
Ok(ts) => ts,
|
||||
Err(e) => return ret_err(e),
|
||||
};
|
||||
if let Some(upper) = upper_bound {
|
||||
upper_bound = Some(upper.min(ts));
|
||||
} else {
|
||||
upper_bound = Some(ts);
|
||||
}
|
||||
// Force the upper bound to be at least the lower
|
||||
// bound.
|
||||
if upper_bound.is_some() && upper_bound < Some(lower_bound) {
|
||||
upper_bound = Some(lower_bound);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if Some(lower_bound) != upper_bound && !null_eval {
|
||||
let res_row = Row::pack(self.mfp.mfp.projection.iter().map(|c| values[*c].clone()));
|
||||
let upper_opt =
|
||||
upper_bound.map(|upper_bound| Ok((res_row.clone(), upper_bound, -diff)));
|
||||
// if diff==-1, the `upper_opt` will cancel the future `-1` inserted before by previous diff==1 row
|
||||
let lower = Some(Ok((res_row, lower_bound, diff)));
|
||||
|
||||
lower.into_iter().chain(upper_opt)
|
||||
} else {
|
||||
None.into_iter().chain(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::expr::{BinaryFunc, UnaryFunc, UnmaterializableFunc};
|
||||
#[test]
|
||||
fn test_mfp_with_time() {
|
||||
use crate::expr::func::BinaryFunc;
|
||||
let lte_now = ScalarExpr::Column(0).call_binary(
|
||||
ScalarExpr::CallUnmaterializable(UnmaterializableFunc::Now),
|
||||
BinaryFunc::Lte,
|
||||
);
|
||||
assert!(lte_now.contains_temporal());
|
||||
|
||||
let gt_now_minus_two = ScalarExpr::Column(0)
|
||||
.call_binary(
|
||||
ScalarExpr::Literal(Value::from(2i64), ConcreteDataType::int64_datatype()),
|
||||
BinaryFunc::AddInt64,
|
||||
)
|
||||
.call_binary(
|
||||
ScalarExpr::CallUnmaterializable(UnmaterializableFunc::Now),
|
||||
BinaryFunc::Gt,
|
||||
);
|
||||
assert!(gt_now_minus_two.contains_temporal());
|
||||
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.filter(vec![
|
||||
// col(0) <= now()
|
||||
lte_now,
|
||||
// col(0) + 2 > now()
|
||||
gt_now_minus_two,
|
||||
])
|
||||
.unwrap()
|
||||
.project(vec![0])
|
||||
.unwrap();
|
||||
|
||||
let mfp = MfpPlan::create_from(mfp).unwrap();
|
||||
let expected = vec![
|
||||
(
|
||||
0,
|
||||
vec![
|
||||
(Row::new(vec![Value::from(4i64)]), 4, 1),
|
||||
(Row::new(vec![Value::from(4i64)]), 6, -1),
|
||||
],
|
||||
),
|
||||
(
|
||||
5,
|
||||
vec![
|
||||
(Row::new(vec![Value::from(4i64)]), 5, 1),
|
||||
(Row::new(vec![Value::from(4i64)]), 6, -1),
|
||||
],
|
||||
),
|
||||
(10, vec![]),
|
||||
];
|
||||
for (sys_time, expected) in expected {
|
||||
let mut values = vec![Value::from(4i64), Value::from(2i64), Value::from(3i64)];
|
||||
let ret = mfp
|
||||
.evaluate::<EvalError>(&mut values, sys_time, 1)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert_eq!(ret, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mfp() {
|
||||
use crate::expr::func::BinaryFunc;
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.map(vec![
|
||||
ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt),
|
||||
ScalarExpr::Column(1).call_binary(ScalarExpr::Column(2), BinaryFunc::Lt),
|
||||
])
|
||||
.unwrap()
|
||||
.project(vec![3, 4])
|
||||
.unwrap();
|
||||
assert!(!mfp.is_identity());
|
||||
let mfp = MapFilterProject::compose(mfp, MapFilterProject::new(2)).unwrap();
|
||||
{
|
||||
let mfp_0 = mfp.as_map_filter_project();
|
||||
let same = MapFilterProject::new(3)
|
||||
.map(mfp_0.0)
|
||||
.unwrap()
|
||||
.filter(mfp_0.1)
|
||||
.unwrap()
|
||||
.project(mfp_0.2)
|
||||
.unwrap();
|
||||
assert_eq!(mfp, same);
|
||||
}
|
||||
assert_eq!(mfp.demand().len(), 3);
|
||||
let mut mfp = mfp;
|
||||
mfp.permute(BTreeMap::from([(0, 2), (2, 0), (1, 1)]), 3)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
mfp,
|
||||
MapFilterProject::new(3)
|
||||
.map(vec![
|
||||
ScalarExpr::Column(2).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt),
|
||||
ScalarExpr::Column(1).call_binary(ScalarExpr::Column(0), BinaryFunc::Lt),
|
||||
])
|
||||
.unwrap()
|
||||
.project(vec![3, 4])
|
||||
.unwrap()
|
||||
);
|
||||
let safe_mfp = SafeMfpPlan { mfp };
|
||||
let mut values = vec![Value::from(4), Value::from(2), Value::from(3)];
|
||||
let ret = safe_mfp
|
||||
.evaluate_into(&mut values, &mut Row::empty())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(ret, Row::pack(vec![Value::from(false), Value::from(true)]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn manipulation_mfp() {
|
||||
// give a input of 4 columns
|
||||
let mfp = MapFilterProject::new(4);
|
||||
// append a expression to the mfp'input row that get the sum of the first 3 columns
|
||||
let mfp = mfp
|
||||
.map(vec![ScalarExpr::Column(0)
|
||||
.call_binary(ScalarExpr::Column(1), BinaryFunc::AddInt32)
|
||||
.call_binary(ScalarExpr::Column(2), BinaryFunc::AddInt32)])
|
||||
.unwrap();
|
||||
// only retain sum result
|
||||
let mfp = mfp.project(vec![4]).unwrap();
|
||||
// accept only if the sum is greater than 10
|
||||
let mfp = mfp
|
||||
.filter(vec![ScalarExpr::Column(0).call_binary(
|
||||
ScalarExpr::Literal(Value::from(10i32), ConcreteDataType::int32_datatype()),
|
||||
BinaryFunc::Gt,
|
||||
)])
|
||||
.unwrap();
|
||||
let mut input1 = vec![
|
||||
Value::from(4),
|
||||
Value::from(2),
|
||||
Value::from(3),
|
||||
Value::from("abc"),
|
||||
];
|
||||
let safe_mfp = SafeMfpPlan { mfp };
|
||||
let ret = safe_mfp
|
||||
.evaluate_into(&mut input1, &mut Row::empty())
|
||||
.unwrap();
|
||||
assert_eq!(ret, None);
|
||||
let mut input2 = vec![
|
||||
Value::from(5),
|
||||
Value::from(2),
|
||||
Value::from(4),
|
||||
Value::from("abc"),
|
||||
];
|
||||
let ret = safe_mfp
|
||||
.evaluate_into(&mut input2, &mut Row::empty())
|
||||
.unwrap();
|
||||
assert_eq!(ret, Some(Row::pack(vec![Value::from(11)])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permute() {
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.map(vec![
|
||||
ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt)
|
||||
])
|
||||
.unwrap()
|
||||
.filter(vec![
|
||||
ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt)
|
||||
])
|
||||
.unwrap()
|
||||
.project(vec![0, 1])
|
||||
.unwrap();
|
||||
assert_eq!(mfp.demand(), BTreeSet::from([0, 1]));
|
||||
let mut less = mfp.clone();
|
||||
less.permute(BTreeMap::from([(1, 0), (0, 1)]), 2).unwrap();
|
||||
|
||||
let mut more = mfp.clone();
|
||||
more.permute(BTreeMap::from([(0, 1), (1, 2), (2, 0)]), 4)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mfp_test_cast_and_filter() {
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.map(vec![ScalarExpr::Column(0).call_unary(UnaryFunc::Cast(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
))])
|
||||
.unwrap()
|
||||
.filter(vec![
|
||||
ScalarExpr::Column(3).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt)
|
||||
])
|
||||
.unwrap()
|
||||
.project([0, 1, 2])
|
||||
.unwrap();
|
||||
let mut input1 = vec![
|
||||
Value::from(4i64),
|
||||
Value::from(2),
|
||||
Value::from(3),
|
||||
Value::from(53),
|
||||
];
|
||||
let safe_mfp = SafeMfpPlan { mfp };
|
||||
let ret = safe_mfp.evaluate_into(&mut input1, &mut Row::empty());
|
||||
assert!(matches!(ret, Err(EvalError::InvalidArgument { .. })));
|
||||
|
||||
let input2 = vec![Value::from(4i64), Value::from(2), Value::from(3)];
|
||||
let ret = safe_mfp
|
||||
.evaluate_into(&mut input2.clone(), &mut Row::empty())
|
||||
.unwrap();
|
||||
assert_eq!(ret, Some(Row::new(input2)));
|
||||
|
||||
let mut input3 = vec![Value::from(4i64), Value::from(5), Value::from(2)];
|
||||
let ret = safe_mfp
|
||||
.evaluate_into(&mut input3, &mut Row::empty())
|
||||
.unwrap();
|
||||
assert_eq!(ret, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mfp_out_of_order() {
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.project(vec![2, 1, 0])
|
||||
.unwrap()
|
||||
.filter(vec![
|
||||
ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt)
|
||||
])
|
||||
.unwrap()
|
||||
.map(vec![
|
||||
ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt)
|
||||
])
|
||||
.unwrap()
|
||||
.project(vec![3])
|
||||
.unwrap();
|
||||
let mut input1 = vec![Value::from(2), Value::from(3), Value::from(4)];
|
||||
let safe_mfp = SafeMfpPlan { mfp };
|
||||
let ret = safe_mfp.evaluate_into(&mut input1, &mut Row::empty());
|
||||
assert_eq!(ret.unwrap(), Some(Row::new(vec![Value::from(false)])));
|
||||
}
|
||||
#[test]
|
||||
fn test_mfp_chore() {
|
||||
// project keeps permute columns until it becomes the identity permutation
|
||||
let mfp = MapFilterProject::new(3)
|
||||
.project([1, 2, 0])
|
||||
.unwrap()
|
||||
.project([1, 2, 0])
|
||||
.unwrap()
|
||||
.project([1, 2, 0])
|
||||
.unwrap();
|
||||
assert_eq!(mfp, MapFilterProject::new(3));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ mod accum;
|
||||
mod func;
|
||||
|
||||
/// Describes an aggregation expression.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]
|
||||
pub struct AggregateExpr {
|
||||
/// Names the aggregation function.
|
||||
pub func: AggregateFunc,
|
||||
|
||||
@@ -14,7 +14,10 @@
|
||||
|
||||
//! Accumulators for aggregate functions that's is accumulatable. i.e. sum/count
|
||||
//!
|
||||
//! Currently support sum, count, any, all
|
||||
//! Accumulator will only be restore from row and being updated every time dataflow need process a new batch of rows.
|
||||
//! So the overhead is acceptable.
|
||||
//!
|
||||
//! Currently support sum, count, any, all and min/max(with one caveat that min/max can't support delete with aggregate).
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
@@ -22,13 +25,506 @@ use common_decimal::Decimal128;
|
||||
use common_time::{Date, DateTime};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::value::{OrderedF32, OrderedF64, OrderedFloat, Value};
|
||||
use enum_dispatch::enum_dispatch;
|
||||
use hydroflow::futures::stream::Concat;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::expr::error::{InternalSnafu, TryFromValueSnafu, TypeMismatchSnafu};
|
||||
use crate::expr::error::{InternalSnafu, OverflowSnafu, TryFromValueSnafu, TypeMismatchSnafu};
|
||||
use crate::expr::relation::func::GenericFn;
|
||||
use crate::expr::{AggregateFunc, EvalError};
|
||||
use crate::repr::Diff;
|
||||
|
||||
/// Accumulates values for the various types of accumulable aggregations.
|
||||
#[enum_dispatch]
|
||||
pub trait Accumulator: Sized {
|
||||
fn into_state(self) -> Vec<Value>;
|
||||
fn update(
|
||||
&mut self,
|
||||
aggr_fn: &AggregateFunc,
|
||||
value: Value,
|
||||
diff: Diff,
|
||||
) -> Result<(), EvalError>;
|
||||
|
||||
fn update_batch<I>(&mut self, aggr_fn: &AggregateFunc, value_diffs: I) -> Result<(), EvalError>
|
||||
where
|
||||
I: IntoIterator<Item = (Value, Diff)>,
|
||||
{
|
||||
for (v, d) in value_diffs {
|
||||
self.update(aggr_fn, v, d)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eval(&self, aggr_fn: &AggregateFunc) -> Result<Value, EvalError>;
|
||||
}
|
||||
|
||||
/// Bool accumulator, used for `Any` `All` `Max/MinBool`
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Bool {
|
||||
/// The number of `true` values observed.
|
||||
trues: Diff,
|
||||
/// The number of `false` values observed.
|
||||
falses: Diff,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Value>> for Bool {
|
||||
type Error = EvalError;
|
||||
|
||||
fn try_from(state: Vec<Value>) -> Result<Self, Self::Error> {
|
||||
ensure!(
|
||||
state.len() == 2,
|
||||
InternalSnafu {
|
||||
reason: "Bool Accumulator state should have 2 values",
|
||||
}
|
||||
);
|
||||
|
||||
let mut iter = state.into_iter();
|
||||
|
||||
Ok(Self {
|
||||
trues: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
falses: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for Bool {
|
||||
fn into_state(self) -> Vec<Value> {
|
||||
vec![self.trues.into(), self.falses.into()]
|
||||
}
|
||||
|
||||
/// Null values are ignored
|
||||
fn update(
|
||||
&mut self,
|
||||
aggr_fn: &AggregateFunc,
|
||||
value: Value,
|
||||
diff: Diff,
|
||||
) -> Result<(), EvalError> {
|
||||
ensure!(
|
||||
matches!(
|
||||
aggr_fn,
|
||||
AggregateFunc::Any
|
||||
| AggregateFunc::All
|
||||
| AggregateFunc::MaxBool
|
||||
| AggregateFunc::MinBool
|
||||
),
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"Bool Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
match value {
|
||||
Value::Boolean(true) => self.trues += diff,
|
||||
Value::Boolean(false) => self.falses += diff,
|
||||
Value::Null => (), // ignore nulls
|
||||
x => {
|
||||
return Err(TypeMismatchSnafu {
|
||||
expected: ConcreteDataType::boolean_datatype(),
|
||||
actual: x.data_type(),
|
||||
}
|
||||
.build());
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eval(&self, aggr_fn: &AggregateFunc) -> Result<Value, EvalError> {
|
||||
match aggr_fn {
|
||||
AggregateFunc::Any => Ok(Value::from(self.trues > 0)),
|
||||
AggregateFunc::All => Ok(Value::from(self.falses == 0)),
|
||||
AggregateFunc::MaxBool => Ok(Value::from(self.trues > 0)),
|
||||
AggregateFunc::MinBool => Ok(Value::from(self.falses == 0)),
|
||||
_ => Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"Bool Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
.build()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulates simple numeric values for sum over integer.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct SimpleNumber {
|
||||
/// The accumulation of all non-NULL values observed.
|
||||
accum: i128,
|
||||
/// The number of non-NULL values observed.
|
||||
non_nulls: Diff,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Value>> for SimpleNumber {
|
||||
type Error = EvalError;
|
||||
|
||||
fn try_from(state: Vec<Value>) -> Result<Self, Self::Error> {
|
||||
ensure!(
|
||||
state.len() == 2,
|
||||
InternalSnafu {
|
||||
reason: "Number Accumulator state should have 2 values",
|
||||
}
|
||||
);
|
||||
let mut iter = state.into_iter();
|
||||
|
||||
Ok(Self {
|
||||
accum: Decimal128::try_from(iter.next().unwrap())
|
||||
.map_err(err_try_from_val)?
|
||||
.val(),
|
||||
non_nulls: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for SimpleNumber {
|
||||
fn into_state(self) -> Vec<Value> {
|
||||
vec![
|
||||
Value::Decimal128(Decimal128::new(self.accum, 38, 0)),
|
||||
self.non_nulls.into(),
|
||||
]
|
||||
}
|
||||
|
||||
fn update(
|
||||
&mut self,
|
||||
aggr_fn: &AggregateFunc,
|
||||
value: Value,
|
||||
diff: Diff,
|
||||
) -> Result<(), EvalError> {
|
||||
ensure!(
|
||||
matches!(
|
||||
aggr_fn,
|
||||
AggregateFunc::SumInt16
|
||||
| AggregateFunc::SumInt32
|
||||
| AggregateFunc::SumInt64
|
||||
| AggregateFunc::SumUInt16
|
||||
| AggregateFunc::SumUInt32
|
||||
| AggregateFunc::SumUInt64
|
||||
),
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"SimpleNumber Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let v = match (aggr_fn, value) {
|
||||
(AggregateFunc::SumInt16, Value::Int16(x)) => i128::from(x),
|
||||
(AggregateFunc::SumInt32, Value::Int32(x)) => i128::from(x),
|
||||
(AggregateFunc::SumInt64, Value::Int64(x)) => i128::from(x),
|
||||
(AggregateFunc::SumUInt16, Value::UInt16(x)) => i128::from(x),
|
||||
(AggregateFunc::SumUInt32, Value::UInt32(x)) => i128::from(x),
|
||||
(AggregateFunc::SumUInt64, Value::UInt64(x)) => i128::from(x),
|
||||
(_f, Value::Null) => return Ok(()), // ignore null
|
||||
(f, v) => {
|
||||
let expected_datatype = f.signature().input;
|
||||
return Err(TypeMismatchSnafu {
|
||||
expected: expected_datatype,
|
||||
actual: v.data_type(),
|
||||
}
|
||||
.build())?;
|
||||
}
|
||||
};
|
||||
|
||||
self.accum += v * i128::from(diff);
|
||||
|
||||
self.non_nulls += diff;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eval(&self, aggr_fn: &AggregateFunc) -> Result<Value, EvalError> {
|
||||
match aggr_fn {
|
||||
AggregateFunc::SumInt16 | AggregateFunc::SumInt32 | AggregateFunc::SumInt64 => {
|
||||
i64::try_from(self.accum)
|
||||
.map_err(|_e| OverflowSnafu {}.build())
|
||||
.map(Value::from)
|
||||
}
|
||||
AggregateFunc::SumUInt16 | AggregateFunc::SumUInt32 | AggregateFunc::SumUInt64 => {
|
||||
u64::try_from(self.accum)
|
||||
.map_err(|_e| OverflowSnafu {}.build())
|
||||
.map(Value::from)
|
||||
}
|
||||
_ => Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"SimpleNumber Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
.build()),
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Accumulates float values for sum over floating numbers.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
|
||||
pub struct Float {
|
||||
/// Accumulates non-special float values, i.e. not NaN, +inf, -inf.
|
||||
/// accum will be set to zero if `non_nulls` is zero.
|
||||
accum: OrderedF64,
|
||||
/// Counts +inf
|
||||
pos_infs: Diff,
|
||||
/// Counts -inf
|
||||
neg_infs: Diff,
|
||||
/// Counts NaNs
|
||||
nans: Diff,
|
||||
/// Counts non-NULL values
|
||||
non_nulls: Diff,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Value>> for Float {
|
||||
type Error = EvalError;
|
||||
|
||||
fn try_from(state: Vec<Value>) -> Result<Self, Self::Error> {
|
||||
ensure!(
|
||||
state.len() == 5,
|
||||
InternalSnafu {
|
||||
reason: "Float Accumulator state should have 5 values",
|
||||
}
|
||||
);
|
||||
|
||||
let mut iter = state.into_iter();
|
||||
|
||||
let mut ret = Self {
|
||||
accum: OrderedF64::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
pos_infs: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
neg_infs: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
nans: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
non_nulls: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
};
|
||||
|
||||
// This prevent counter-intuitive behavior of summing over no values
|
||||
if ret.non_nulls == 0 {
|
||||
ret.accum = OrderedFloat::from(0.0);
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for Float {
|
||||
fn into_state(self) -> Vec<Value> {
|
||||
vec![
|
||||
self.accum.into(),
|
||||
self.pos_infs.into(),
|
||||
self.neg_infs.into(),
|
||||
self.nans.into(),
|
||||
self.non_nulls.into(),
|
||||
]
|
||||
}
|
||||
|
||||
/// sum ignore null
|
||||
fn update(
|
||||
&mut self,
|
||||
aggr_fn: &AggregateFunc,
|
||||
value: Value,
|
||||
diff: Diff,
|
||||
) -> Result<(), EvalError> {
|
||||
ensure!(
|
||||
matches!(
|
||||
aggr_fn,
|
||||
AggregateFunc::SumFloat32 | AggregateFunc::SumFloat64
|
||||
),
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"Float Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let x = match (aggr_fn, value) {
|
||||
(AggregateFunc::SumFloat32, Value::Float32(x)) => OrderedF64::from(*x as f64),
|
||||
(AggregateFunc::SumFloat64, Value::Float64(x)) => OrderedF64::from(x),
|
||||
(_f, Value::Null) => return Ok(()), // ignore null
|
||||
(f, v) => {
|
||||
let expected_datatype = f.signature().input;
|
||||
return Err(TypeMismatchSnafu {
|
||||
expected: expected_datatype,
|
||||
actual: v.data_type(),
|
||||
}
|
||||
.build())?;
|
||||
}
|
||||
};
|
||||
|
||||
if x.is_nan() {
|
||||
self.nans += diff;
|
||||
} else if x.is_infinite() {
|
||||
if x.is_sign_positive() {
|
||||
self.pos_infs += diff;
|
||||
} else {
|
||||
self.neg_infs += diff;
|
||||
}
|
||||
} else {
|
||||
self.accum += *(x * OrderedF64::from(diff as f64));
|
||||
}
|
||||
|
||||
self.non_nulls += diff;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eval(&self, aggr_fn: &AggregateFunc) -> Result<Value, EvalError> {
|
||||
match aggr_fn {
|
||||
AggregateFunc::SumFloat32 => Ok(Value::Float32(OrderedF32::from(self.accum.0 as f32))),
|
||||
AggregateFunc::SumFloat64 => Ok(Value::Float64(self.accum)),
|
||||
_ => Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"Float Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
.build()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulates a single `Ord`ed `Value`, useful for min/max aggregations.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct OrdValue {
|
||||
val: Option<Value>,
|
||||
non_nulls: Diff,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Value>> for OrdValue {
|
||||
type Error = EvalError;
|
||||
|
||||
fn try_from(state: Vec<Value>) -> Result<Self, Self::Error> {
|
||||
ensure!(
|
||||
state.len() == 2,
|
||||
InternalSnafu {
|
||||
reason: "OrdValue Accumulator state should have 2 values",
|
||||
}
|
||||
);
|
||||
|
||||
let mut iter = state.into_iter();
|
||||
|
||||
Ok(Self {
|
||||
val: {
|
||||
let v = iter.next().unwrap();
|
||||
if v == Value::Null {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
},
|
||||
non_nulls: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for OrdValue {
|
||||
fn into_state(self) -> Vec<Value> {
|
||||
vec![self.val.unwrap_or(Value::Null), self.non_nulls.into()]
|
||||
}
|
||||
|
||||
/// min/max try to find results in all non-null values, if all values are null, the result is null.
|
||||
/// count(col_name) gives the number of non-null values, count(*) gives the number of rows including nulls.
|
||||
/// TODO(discord9): add count(*) as a aggr function
|
||||
fn update(
|
||||
&mut self,
|
||||
aggr_fn: &AggregateFunc,
|
||||
value: Value,
|
||||
diff: Diff,
|
||||
) -> Result<(), EvalError> {
|
||||
ensure!(
|
||||
aggr_fn.is_max() || aggr_fn.is_min() || matches!(aggr_fn, AggregateFunc::Count),
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"OrdValue Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
);
|
||||
if diff <= 0 && (aggr_fn.is_max() || aggr_fn.is_min()) {
|
||||
return Err(InternalSnafu {
|
||||
reason: "OrdValue Accumulator does not support non-monotonic input for min/max aggregation".to_string(),
|
||||
}.build());
|
||||
}
|
||||
|
||||
// if aggr_fn is count, the incoming value type doesn't matter in type checking
|
||||
// otherwise, type need to be the same or value can be null
|
||||
let check_type_aggr_fn_and_arg_value =
|
||||
ty_eq_without_precision(value.data_type(), aggr_fn.signature().input)
|
||||
|| matches!(aggr_fn, AggregateFunc::Count)
|
||||
|| value.is_null();
|
||||
let check_type_aggr_fn_and_self_val = self
|
||||
.val
|
||||
.as_ref()
|
||||
.map(|zelf| ty_eq_without_precision(zelf.data_type(), aggr_fn.signature().input))
|
||||
.unwrap_or(true)
|
||||
|| matches!(aggr_fn, AggregateFunc::Count);
|
||||
|
||||
if !check_type_aggr_fn_and_arg_value {
|
||||
return Err(TypeMismatchSnafu {
|
||||
expected: aggr_fn.signature().input,
|
||||
actual: value.data_type(),
|
||||
}
|
||||
.build());
|
||||
} else if !check_type_aggr_fn_and_self_val {
|
||||
return Err(TypeMismatchSnafu {
|
||||
expected: aggr_fn.signature().input,
|
||||
actual: self
|
||||
.val
|
||||
.as_ref()
|
||||
.map(|v| v.data_type())
|
||||
.unwrap_or(ConcreteDataType::null_datatype()),
|
||||
}
|
||||
.build());
|
||||
}
|
||||
|
||||
let is_null = value.is_null();
|
||||
if is_null {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !is_null {
|
||||
// compile count(*) to count(true) to include null/non-nulls
|
||||
// And the counts of non-null values are updated here
|
||||
self.non_nulls += diff;
|
||||
|
||||
match aggr_fn.signature().generic_fn {
|
||||
GenericFn::Max => {
|
||||
self.val = self
|
||||
.val
|
||||
.clone()
|
||||
.map(|v| v.max(value.clone()))
|
||||
.or_else(|| Some(value))
|
||||
}
|
||||
GenericFn::Min => {
|
||||
self.val = self
|
||||
.val
|
||||
.clone()
|
||||
.map(|v| v.min(value.clone()))
|
||||
.or_else(|| Some(value))
|
||||
}
|
||||
|
||||
GenericFn::Count => (),
|
||||
_ => unreachable!("already checked by ensure!"),
|
||||
}
|
||||
};
|
||||
// min/max ignore nulls
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eval(&self, aggr_fn: &AggregateFunc) -> Result<Value, EvalError> {
|
||||
if aggr_fn.is_max() || aggr_fn.is_min() {
|
||||
Ok(self.val.clone().unwrap_or(Value::Null))
|
||||
} else if matches!(aggr_fn, AggregateFunc::Count) {
|
||||
Ok(self.non_nulls.into())
|
||||
} else {
|
||||
Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"OrdValue Accumulator does not support this aggregation function: {:?}",
|
||||
aggr_fn
|
||||
),
|
||||
}
|
||||
.build())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulates values for the various types of accumulable aggregations.
|
||||
///
|
||||
/// We assume that there are not more than 2^32 elements for the aggregation.
|
||||
@@ -38,34 +534,407 @@ use crate::repr::Diff;
|
||||
/// The float accumulator performs accumulation with tolerance for floating point error.
|
||||
///
|
||||
/// TODO(discord9): check for overflowing
|
||||
#[enum_dispatch(Accumulator)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Accum {
|
||||
/// Accumulates boolean values.
|
||||
Bool {
|
||||
/// The number of `true` values observed.
|
||||
trues: Diff,
|
||||
/// The number of `false` values observed.
|
||||
falses: Diff,
|
||||
},
|
||||
Bool(Bool),
|
||||
/// Accumulates simple numeric values.
|
||||
SimpleNumber {
|
||||
/// The accumulation of all non-NULL values observed.
|
||||
accum: i128,
|
||||
/// The number of non-NULL values observed.
|
||||
non_nulls: Diff,
|
||||
},
|
||||
SimpleNumber(SimpleNumber),
|
||||
/// Accumulates float values.
|
||||
Float {
|
||||
/// Accumulates non-special float values, i.e. not NaN, +inf, -inf.
|
||||
/// accum will be set to zero if `non_nulls` is zero.
|
||||
accum: OrderedF64,
|
||||
/// Counts +inf
|
||||
pos_infs: Diff,
|
||||
/// Counts -inf
|
||||
neg_infs: Diff,
|
||||
/// Counts NaNs
|
||||
nans: Diff,
|
||||
/// Counts non-NULL values
|
||||
non_nulls: Diff,
|
||||
},
|
||||
Float(Float),
|
||||
/// Accumulate Values that impl `Ord`
|
||||
OrdValue(OrdValue),
|
||||
}
|
||||
|
||||
impl Accum {
|
||||
pub fn new_accum(aggr_fn: &AggregateFunc) -> Result<Self, EvalError> {
|
||||
Ok(match aggr_fn {
|
||||
AggregateFunc::Any
|
||||
| AggregateFunc::All
|
||||
| AggregateFunc::MaxBool
|
||||
| AggregateFunc::MinBool => Self::from(Bool {
|
||||
trues: 0,
|
||||
falses: 0,
|
||||
}),
|
||||
AggregateFunc::SumInt16
|
||||
| AggregateFunc::SumInt32
|
||||
| AggregateFunc::SumInt64
|
||||
| AggregateFunc::SumUInt16
|
||||
| AggregateFunc::SumUInt32
|
||||
| AggregateFunc::SumUInt64 => Self::from(SimpleNumber {
|
||||
accum: 0,
|
||||
non_nulls: 0,
|
||||
}),
|
||||
AggregateFunc::SumFloat32 | AggregateFunc::SumFloat64 => Self::from(Float {
|
||||
accum: OrderedF64::from(0.0),
|
||||
pos_infs: 0,
|
||||
neg_infs: 0,
|
||||
nans: 0,
|
||||
non_nulls: 0,
|
||||
}),
|
||||
f if f.is_max() || f.is_min() || matches!(f, AggregateFunc::Count) => {
|
||||
Self::from(OrdValue {
|
||||
val: None,
|
||||
non_nulls: 0,
|
||||
})
|
||||
}
|
||||
f => {
|
||||
return Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"Accumulator does not support this aggregation function: {:?}",
|
||||
f
|
||||
),
|
||||
}
|
||||
.build());
|
||||
}
|
||||
})
|
||||
}
|
||||
pub fn try_into_accum(aggr_fn: &AggregateFunc, state: Vec<Value>) -> Result<Self, EvalError> {
|
||||
match aggr_fn {
|
||||
AggregateFunc::Any
|
||||
| AggregateFunc::All
|
||||
| AggregateFunc::MaxBool
|
||||
| AggregateFunc::MinBool => Ok(Self::from(Bool::try_from(state)?)),
|
||||
AggregateFunc::SumInt16
|
||||
| AggregateFunc::SumInt32
|
||||
| AggregateFunc::SumInt64
|
||||
| AggregateFunc::SumUInt16
|
||||
| AggregateFunc::SumUInt32
|
||||
| AggregateFunc::SumUInt64 => Ok(Self::from(SimpleNumber::try_from(state)?)),
|
||||
AggregateFunc::SumFloat32 | AggregateFunc::SumFloat64 => {
|
||||
Ok(Self::from(Float::try_from(state)?))
|
||||
}
|
||||
f if f.is_max() || f.is_min() || matches!(f, AggregateFunc::Count) => {
|
||||
Ok(Self::from(OrdValue::try_from(state)?))
|
||||
}
|
||||
f => Err(InternalSnafu {
|
||||
reason: format!(
|
||||
"Accumulator does not support this aggregation function: {:?}",
|
||||
f
|
||||
),
|
||||
}
|
||||
.build()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn err_try_from_val<T: Display>(reason: T) -> EvalError {
|
||||
TryFromValueSnafu {
|
||||
msg: reason.to_string(),
|
||||
}
|
||||
.build()
|
||||
}
|
||||
|
||||
/// compare type while ignore their precision, including `TimeStamp`, `Time`,
|
||||
/// `Duration`, `Interval`
|
||||
fn ty_eq_without_precision(left: ConcreteDataType, right: ConcreteDataType) -> bool {
|
||||
left == right
|
||||
|| matches!(left, ConcreteDataType::Timestamp(..))
|
||||
&& matches!(right, ConcreteDataType::Timestamp(..))
|
||||
|| matches!(left, ConcreteDataType::Time(..)) && matches!(right, ConcreteDataType::Time(..))
|
||||
|| matches!(left, ConcreteDataType::Duration(..))
|
||||
&& matches!(right, ConcreteDataType::Duration(..))
|
||||
|| matches!(left, ConcreteDataType::Interval(..))
|
||||
&& matches!(right, ConcreteDataType::Interval(..))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_accum() {
|
||||
let testcases = vec![
|
||||
(
|
||||
AggregateFunc::SumInt32,
|
||||
vec![(Value::Int32(1), 1), (Value::Null, 1)],
|
||||
(
|
||||
Value::Int64(1),
|
||||
vec![Value::Decimal128(Decimal128::new(1, 38, 0)), 1i64.into()],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::SumFloat32,
|
||||
vec![(Value::Float32(OrderedF32::from(1.0)), 1), (Value::Null, 1)],
|
||||
(
|
||||
Value::Float32(OrderedF32::from(1.0)),
|
||||
vec![
|
||||
Value::Float64(OrderedF64::from(1.0)),
|
||||
0i64.into(),
|
||||
0i64.into(),
|
||||
0i64.into(),
|
||||
1i64.into(),
|
||||
],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MaxInt32,
|
||||
vec![(Value::Int32(1), 1), (Value::Int32(2), 1), (Value::Null, 1)],
|
||||
(Value::Int32(2), vec![Value::Int32(2), 2i64.into()]),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MinInt32,
|
||||
vec![(Value::Int32(2), 1), (Value::Int32(1), 1), (Value::Null, 1)],
|
||||
(Value::Int32(1), vec![Value::Int32(1), 2i64.into()]),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MaxFloat32,
|
||||
vec![
|
||||
(Value::Float32(OrderedF32::from(1.0)), 1),
|
||||
(Value::Float32(OrderedF32::from(2.0)), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::Float32(OrderedF32::from(2.0)),
|
||||
vec![Value::Float32(OrderedF32::from(2.0)), 2i64.into()],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MaxDateTime,
|
||||
vec![
|
||||
(Value::DateTime(DateTime::from(0)), 1),
|
||||
(Value::DateTime(DateTime::from(1)), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::DateTime(DateTime::from(1)),
|
||||
vec![Value::DateTime(DateTime::from(1)), 2i64.into()],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::Count,
|
||||
vec![
|
||||
(Value::Int32(1), 1),
|
||||
(Value::Int32(2), 1),
|
||||
(Value::Null, 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(2i64.into(), vec![Value::Null, 2i64.into()]),
|
||||
),
|
||||
(
|
||||
AggregateFunc::Any,
|
||||
vec![
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(true), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::Boolean(true),
|
||||
vec![Value::from(1i64), Value::from(2i64)],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::All,
|
||||
vec![
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(true), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::Boolean(false),
|
||||
vec![Value::from(1i64), Value::from(2i64)],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MaxBool,
|
||||
vec![
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(true), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::Boolean(true),
|
||||
vec![Value::from(1i64), Value::from(2i64)],
|
||||
),
|
||||
),
|
||||
(
|
||||
AggregateFunc::MinBool,
|
||||
vec![
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(false), 1),
|
||||
(Value::Boolean(true), 1),
|
||||
(Value::Null, 1),
|
||||
],
|
||||
(
|
||||
Value::Boolean(false),
|
||||
vec![Value::from(1i64), Value::from(2i64)],
|
||||
),
|
||||
),
|
||||
];
|
||||
|
||||
for (aggr_fn, input, (eval_res, state)) in testcases {
|
||||
let create_and_insert = || -> Result<Accum, EvalError> {
|
||||
let mut acc = Accum::new_accum(&aggr_fn)?;
|
||||
acc.update_batch(&aggr_fn, input.clone())?;
|
||||
let row = acc.into_state();
|
||||
let acc = Accum::try_into_accum(&aggr_fn, row)?;
|
||||
Ok(acc)
|
||||
};
|
||||
let acc = match create_and_insert() {
|
||||
Ok(acc) => acc,
|
||||
Err(err) => panic!(
|
||||
"Failed to create accum for {:?} with input {:?} with error: {:?}",
|
||||
aggr_fn, input, err
|
||||
),
|
||||
};
|
||||
|
||||
if acc.eval(&aggr_fn).unwrap() != eval_res {
|
||||
panic!(
|
||||
"Failed to eval accum for {:?} with input {:?}, expect {:?}, got {:?}",
|
||||
aggr_fn,
|
||||
input,
|
||||
eval_res,
|
||||
acc.eval(&aggr_fn).unwrap()
|
||||
);
|
||||
}
|
||||
let actual_state = acc.into_state();
|
||||
if actual_state != state {
|
||||
panic!(
|
||||
"Failed to cast into state from accum for {:?} with input {:?}, expect state {:?}, got state {:?}",
|
||||
aggr_fn,
|
||||
input,
|
||||
state,
|
||||
actual_state
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_fail_path_accum() {
|
||||
{
|
||||
let bool_accum = Bool::try_from(vec![Value::Null]);
|
||||
assert!(matches!(bool_accum, Err(EvalError::Internal { .. })));
|
||||
}
|
||||
|
||||
{
|
||||
let mut bool_accum = Bool::try_from(vec![1i64.into(), 1i64.into()]).unwrap();
|
||||
// serde
|
||||
let bool_accum_serde = serde_json::to_string(&bool_accum).unwrap();
|
||||
let bool_accum_de = serde_json::from_str::<Bool>(&bool_accum_serde).unwrap();
|
||||
assert_eq!(bool_accum, bool_accum_de);
|
||||
assert!(matches!(
|
||||
bool_accum.update(&AggregateFunc::MaxDate, 1.into(), 1),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
bool_accum.update(&AggregateFunc::Any, 1.into(), 1),
|
||||
Err(EvalError::TypeMismatch { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
bool_accum.eval(&AggregateFunc::MaxDate),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
}
|
||||
|
||||
{
|
||||
let ret = SimpleNumber::try_from(vec![Value::Null]);
|
||||
assert!(matches!(ret, Err(EvalError::Internal { .. })));
|
||||
let mut accum =
|
||||
SimpleNumber::try_from(vec![Decimal128::new(0, 38, 0).into(), 0i64.into()])
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::All, 0.into(), 1),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::SumInt64, 0i32.into(), 1),
|
||||
Err(EvalError::TypeMismatch { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
accum.eval(&AggregateFunc::All),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
accum
|
||||
.update(&AggregateFunc::SumInt64, 1i64.into(), 1)
|
||||
.unwrap();
|
||||
accum
|
||||
.update(&AggregateFunc::SumInt64, i64::MAX.into(), 1)
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
accum.eval(&AggregateFunc::SumInt64),
|
||||
Err(EvalError::Overflow { .. })
|
||||
));
|
||||
}
|
||||
|
||||
{
|
||||
let ret = Float::try_from(vec![2f64.into(), 0i64.into(), 0i64.into(), 0i64.into()]);
|
||||
assert!(matches!(ret, Err(EvalError::Internal { .. })));
|
||||
let mut accum = Float::try_from(vec![
|
||||
2f64.into(),
|
||||
0i64.into(),
|
||||
0i64.into(),
|
||||
0i64.into(),
|
||||
1i64.into(),
|
||||
])
|
||||
.unwrap();
|
||||
accum
|
||||
.update(&AggregateFunc::SumFloat64, 2f64.into(), -1)
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::All, 0.into(), 1),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::SumFloat64, 0.0f32.into(), 1),
|
||||
Err(EvalError::TypeMismatch { .. })
|
||||
));
|
||||
// no record, no accum
|
||||
assert_eq!(
|
||||
accum.eval(&AggregateFunc::SumFloat64).unwrap(),
|
||||
0.0f64.into()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
accum.eval(&AggregateFunc::All),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
|
||||
accum
|
||||
.update(&AggregateFunc::SumFloat64, f64::INFINITY.into(), 1)
|
||||
.unwrap();
|
||||
accum
|
||||
.update(&AggregateFunc::SumFloat64, (-f64::INFINITY).into(), 1)
|
||||
.unwrap();
|
||||
accum
|
||||
.update(&AggregateFunc::SumFloat64, f64::NAN.into(), 1)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let ret = OrdValue::try_from(vec![Value::Null]);
|
||||
assert!(matches!(ret, Err(EvalError::Internal { .. })));
|
||||
let mut accum = OrdValue::try_from(vec![Value::Null, 0i64.into()]).unwrap();
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::All, 0.into(), 1),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
accum
|
||||
.update(&AggregateFunc::MaxInt16, 1i16.into(), 1)
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::MaxInt16, 0i32.into(), 1),
|
||||
Err(EvalError::TypeMismatch { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::MaxInt16, 0i16.into(), -1),
|
||||
Err(EvalError::Internal { .. })
|
||||
));
|
||||
accum
|
||||
.update(&AggregateFunc::MaxInt16, Value::Null, 1)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// insert uint64 into max_int64 should fail
|
||||
{
|
||||
let mut accum = OrdValue::try_from(vec![Value::Null, 0i64.into()]).unwrap();
|
||||
assert!(matches!(
|
||||
accum.update(&AggregateFunc::MaxInt64, 0u64.into(), 1),
|
||||
Err(EvalError::TypeMismatch { .. })
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,15 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::type_name;
|
||||
|
||||
use common_time::{Date, DateTime};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::expr::error::{EvalError, TryFromValueSnafu, TypeMismatchSnafu};
|
||||
use crate::expr::relation::accum::Accum;
|
||||
use crate::expr::relation::accum::{Accum, Accumulator};
|
||||
use crate::repr::Diff;
|
||||
|
||||
/// Aggregate functions that can be applied to a group of rows.
|
||||
@@ -83,3 +81,280 @@ pub enum AggregateFunc {
|
||||
Any,
|
||||
All,
|
||||
}
|
||||
|
||||
impl AggregateFunc {
|
||||
pub fn is_max(&self) -> bool {
|
||||
self.signature().generic_fn == GenericFn::Max
|
||||
}
|
||||
|
||||
pub fn is_min(&self) -> bool {
|
||||
self.signature().generic_fn == GenericFn::Min
|
||||
}
|
||||
|
||||
pub fn is_sum(&self) -> bool {
|
||||
self.signature().generic_fn == GenericFn::Sum
|
||||
}
|
||||
|
||||
/// Eval value, diff with accumulator
|
||||
///
|
||||
/// Expect self to be accumulable aggregate functio, i.e. sum/count
|
||||
///
|
||||
/// TODO(discord9): deal with overflow&better accumulator
|
||||
pub fn eval_diff_accumulable<I>(
|
||||
&self,
|
||||
accum: Vec<Value>,
|
||||
value_diffs: I,
|
||||
) -> Result<(Value, Vec<Value>), EvalError>
|
||||
where
|
||||
I: IntoIterator<Item = (Value, Diff)>,
|
||||
{
|
||||
let mut accum = if accum.is_empty() {
|
||||
Accum::new_accum(self)?
|
||||
} else {
|
||||
Accum::try_into_accum(self, accum)?
|
||||
};
|
||||
accum.update_batch(self, value_diffs)?;
|
||||
let res = accum.eval(self)?;
|
||||
Ok((res, accum.into_state()))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Signature {
|
||||
pub input: ConcreteDataType,
|
||||
pub output: ConcreteDataType,
|
||||
pub generic_fn: GenericFn,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum GenericFn {
|
||||
Max,
|
||||
Min,
|
||||
Sum,
|
||||
Count,
|
||||
Any,
|
||||
All,
|
||||
}
|
||||
|
||||
impl AggregateFunc {
|
||||
/// all concrete datatypes with precision types will be returned with largest possible variant
|
||||
/// as a exception, count have a signature of `null -> i64`, but it's actually `anytype -> i64`
|
||||
pub fn signature(&self) -> Signature {
|
||||
match self {
|
||||
AggregateFunc::MaxInt16 => Signature {
|
||||
input: ConcreteDataType::int16_datatype(),
|
||||
output: ConcreteDataType::int16_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxInt32 => Signature {
|
||||
input: ConcreteDataType::int32_datatype(),
|
||||
output: ConcreteDataType::int32_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxInt64 => Signature {
|
||||
input: ConcreteDataType::int64_datatype(),
|
||||
output: ConcreteDataType::int64_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxUInt16 => Signature {
|
||||
input: ConcreteDataType::uint16_datatype(),
|
||||
output: ConcreteDataType::uint16_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxUInt32 => Signature {
|
||||
input: ConcreteDataType::uint32_datatype(),
|
||||
output: ConcreteDataType::uint32_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxUInt64 => Signature {
|
||||
input: ConcreteDataType::uint64_datatype(),
|
||||
output: ConcreteDataType::uint64_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxFloat32 => Signature {
|
||||
input: ConcreteDataType::float32_datatype(),
|
||||
output: ConcreteDataType::float32_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxFloat64 => Signature {
|
||||
input: ConcreteDataType::float64_datatype(),
|
||||
output: ConcreteDataType::float64_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxBool => Signature {
|
||||
input: ConcreteDataType::boolean_datatype(),
|
||||
output: ConcreteDataType::boolean_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxString => Signature {
|
||||
input: ConcreteDataType::string_datatype(),
|
||||
output: ConcreteDataType::string_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxDate => Signature {
|
||||
input: ConcreteDataType::date_datatype(),
|
||||
output: ConcreteDataType::date_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxDateTime => Signature {
|
||||
input: ConcreteDataType::datetime_datatype(),
|
||||
output: ConcreteDataType::datetime_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxTimestamp => Signature {
|
||||
input: ConcreteDataType::timestamp_second_datatype(),
|
||||
output: ConcreteDataType::timestamp_second_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxTime => Signature {
|
||||
input: ConcreteDataType::time_second_datatype(),
|
||||
output: ConcreteDataType::time_second_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxDuration => Signature {
|
||||
input: ConcreteDataType::duration_second_datatype(),
|
||||
output: ConcreteDataType::duration_second_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MaxInterval => Signature {
|
||||
input: ConcreteDataType::interval_year_month_datatype(),
|
||||
output: ConcreteDataType::interval_year_month_datatype(),
|
||||
generic_fn: GenericFn::Max,
|
||||
},
|
||||
AggregateFunc::MinInt16 => Signature {
|
||||
input: ConcreteDataType::int16_datatype(),
|
||||
output: ConcreteDataType::int16_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinInt32 => Signature {
|
||||
input: ConcreteDataType::int32_datatype(),
|
||||
output: ConcreteDataType::int32_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinInt64 => Signature {
|
||||
input: ConcreteDataType::int64_datatype(),
|
||||
output: ConcreteDataType::int64_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinUInt16 => Signature {
|
||||
input: ConcreteDataType::uint16_datatype(),
|
||||
output: ConcreteDataType::uint16_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinUInt32 => Signature {
|
||||
input: ConcreteDataType::uint32_datatype(),
|
||||
output: ConcreteDataType::uint32_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinUInt64 => Signature {
|
||||
input: ConcreteDataType::uint64_datatype(),
|
||||
output: ConcreteDataType::uint64_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinFloat32 => Signature {
|
||||
input: ConcreteDataType::float32_datatype(),
|
||||
output: ConcreteDataType::float32_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinFloat64 => Signature {
|
||||
input: ConcreteDataType::float64_datatype(),
|
||||
output: ConcreteDataType::float64_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinBool => Signature {
|
||||
input: ConcreteDataType::boolean_datatype(),
|
||||
output: ConcreteDataType::boolean_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinString => Signature {
|
||||
input: ConcreteDataType::string_datatype(),
|
||||
output: ConcreteDataType::string_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinDate => Signature {
|
||||
input: ConcreteDataType::date_datatype(),
|
||||
output: ConcreteDataType::date_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinDateTime => Signature {
|
||||
input: ConcreteDataType::datetime_datatype(),
|
||||
output: ConcreteDataType::datetime_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinTimestamp => Signature {
|
||||
input: ConcreteDataType::timestamp_second_datatype(),
|
||||
output: ConcreteDataType::timestamp_second_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinTime => Signature {
|
||||
input: ConcreteDataType::time_second_datatype(),
|
||||
output: ConcreteDataType::time_second_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinDuration => Signature {
|
||||
input: ConcreteDataType::duration_second_datatype(),
|
||||
output: ConcreteDataType::duration_second_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::MinInterval => Signature {
|
||||
input: ConcreteDataType::interval_year_month_datatype(),
|
||||
output: ConcreteDataType::interval_year_month_datatype(),
|
||||
generic_fn: GenericFn::Min,
|
||||
},
|
||||
AggregateFunc::SumInt16 => Signature {
|
||||
input: ConcreteDataType::int16_datatype(),
|
||||
output: ConcreteDataType::int16_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumInt32 => Signature {
|
||||
input: ConcreteDataType::int32_datatype(),
|
||||
output: ConcreteDataType::int32_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumInt64 => Signature {
|
||||
input: ConcreteDataType::int64_datatype(),
|
||||
output: ConcreteDataType::int64_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumUInt16 => Signature {
|
||||
input: ConcreteDataType::uint16_datatype(),
|
||||
output: ConcreteDataType::uint16_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumUInt32 => Signature {
|
||||
input: ConcreteDataType::uint32_datatype(),
|
||||
output: ConcreteDataType::uint32_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumUInt64 => Signature {
|
||||
input: ConcreteDataType::uint64_datatype(),
|
||||
output: ConcreteDataType::uint64_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumFloat32 => Signature {
|
||||
input: ConcreteDataType::float32_datatype(),
|
||||
output: ConcreteDataType::float32_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::SumFloat64 => Signature {
|
||||
input: ConcreteDataType::float64_datatype(),
|
||||
output: ConcreteDataType::float64_datatype(),
|
||||
generic_fn: GenericFn::Sum,
|
||||
},
|
||||
AggregateFunc::Count => Signature {
|
||||
input: ConcreteDataType::null_datatype(),
|
||||
output: ConcreteDataType::int64_datatype(),
|
||||
generic_fn: GenericFn::Count,
|
||||
},
|
||||
AggregateFunc::Any => Signature {
|
||||
input: ConcreteDataType::boolean_datatype(),
|
||||
output: ConcreteDataType::boolean_datatype(),
|
||||
generic_fn: GenericFn::Any,
|
||||
},
|
||||
AggregateFunc::All => Signature {
|
||||
input: ConcreteDataType::boolean_datatype(),
|
||||
output: ConcreteDataType::boolean_datatype(),
|
||||
generic_fn: GenericFn::All,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::expr::error::{
|
||||
EvalError, InvalidArgumentSnafu, OptimizeSnafu, UnsupportedTemporalFilterSnafu,
|
||||
@@ -82,7 +83,7 @@ impl ScalarExpr {
|
||||
match self {
|
||||
ScalarExpr::Column(index) => Ok(values[*index].clone()),
|
||||
ScalarExpr::Literal(row_res, _ty) => Ok(row_res.clone()),
|
||||
ScalarExpr::CallUnmaterializable(f) => OptimizeSnafu {
|
||||
ScalarExpr::CallUnmaterializable(_) => OptimizeSnafu {
|
||||
reason: "Can't eval unmaterializable function".to_string(),
|
||||
}
|
||||
.fail(),
|
||||
@@ -105,12 +106,27 @@ impl ScalarExpr {
|
||||
/// This method is applicable even when `permutation` is not a
|
||||
/// strict permutation, and it only needs to have entries for
|
||||
/// each column referenced in `self`.
|
||||
pub fn permute(&mut self, permutation: &[usize]) {
|
||||
pub fn permute(&mut self, permutation: &[usize]) -> Result<(), EvalError> {
|
||||
// check first so that we don't end up with a partially permuted expression
|
||||
ensure!(
|
||||
self.get_all_ref_columns()
|
||||
.into_iter()
|
||||
.all(|i| i < permutation.len()),
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"permutation {:?} is not a valid permutation for expression {:?}",
|
||||
permutation, self
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
self.visit_mut_post_nolimit(&mut |e| {
|
||||
if let ScalarExpr::Column(old_i) = e {
|
||||
*old_i = permutation[*old_i];
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rewrites column indices with their value in `permutation`.
|
||||
@@ -118,12 +134,25 @@ impl ScalarExpr {
|
||||
/// This method is applicable even when `permutation` is not a
|
||||
/// strict permutation, and it only needs to have entries for
|
||||
/// each column referenced in `self`.
|
||||
pub fn permute_map(&mut self, permutation: &BTreeMap<usize, usize>) {
|
||||
pub fn permute_map(&mut self, permutation: &BTreeMap<usize, usize>) -> Result<(), EvalError> {
|
||||
// check first so that we don't end up with a partially permuted expression
|
||||
ensure!(
|
||||
self.get_all_ref_columns()
|
||||
.is_subset(&permutation.keys().cloned().collect()),
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"permutation {:?} is not a valid permutation for expression {:?}",
|
||||
permutation, self
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
self.visit_mut_post_nolimit(&mut |e| {
|
||||
if let ScalarExpr::Column(old_i) = e {
|
||||
*old_i = permutation[old_i];
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the set of columns that are referenced by `self`.
|
||||
@@ -133,7 +162,9 @@ impl ScalarExpr {
|
||||
if let ScalarExpr::Column(i) = e {
|
||||
support.insert(*i);
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
support
|
||||
}
|
||||
|
||||
@@ -180,70 +211,72 @@ impl ScalarExpr {
|
||||
|
||||
impl ScalarExpr {
|
||||
/// visit post-order without stack call limit, but may cause stack overflow
|
||||
fn visit_post_nolimit<F>(&self, f: &mut F)
|
||||
fn visit_post_nolimit<F>(&self, f: &mut F) -> Result<(), EvalError>
|
||||
where
|
||||
F: FnMut(&Self),
|
||||
F: FnMut(&Self) -> Result<(), EvalError>,
|
||||
{
|
||||
self.visit_children(|e| e.visit_post_nolimit(f));
|
||||
f(self);
|
||||
self.visit_children(|e| e.visit_post_nolimit(f))?;
|
||||
f(self)
|
||||
}
|
||||
|
||||
fn visit_children<F>(&self, mut f: F)
|
||||
fn visit_children<F>(&self, mut f: F) -> Result<(), EvalError>
|
||||
where
|
||||
F: FnMut(&Self),
|
||||
F: FnMut(&Self) -> Result<(), EvalError>,
|
||||
{
|
||||
match self {
|
||||
ScalarExpr::Column(_)
|
||||
| ScalarExpr::Literal(_, _)
|
||||
| ScalarExpr::CallUnmaterializable(_) => (),
|
||||
| ScalarExpr::CallUnmaterializable(_) => Ok(()),
|
||||
ScalarExpr::CallUnary { expr, .. } => f(expr),
|
||||
ScalarExpr::CallBinary { expr1, expr2, .. } => {
|
||||
f(expr1);
|
||||
f(expr2);
|
||||
f(expr1)?;
|
||||
f(expr2)
|
||||
}
|
||||
ScalarExpr::CallVariadic { exprs, .. } => {
|
||||
for expr in exprs {
|
||||
f(expr);
|
||||
f(expr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
ScalarExpr::If { cond, then, els } => {
|
||||
f(cond);
|
||||
f(then);
|
||||
f(els);
|
||||
f(cond)?;
|
||||
f(then)?;
|
||||
f(els)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_mut_post_nolimit<F>(&mut self, f: &mut F)
|
||||
fn visit_mut_post_nolimit<F>(&mut self, f: &mut F) -> Result<(), EvalError>
|
||||
where
|
||||
F: FnMut(&mut Self),
|
||||
F: FnMut(&mut Self) -> Result<(), EvalError>,
|
||||
{
|
||||
self.visit_mut_children(|e: &mut Self| e.visit_mut_post_nolimit(f));
|
||||
f(self);
|
||||
self.visit_mut_children(|e: &mut Self| e.visit_mut_post_nolimit(f))?;
|
||||
f(self)
|
||||
}
|
||||
|
||||
fn visit_mut_children<F>(&mut self, mut f: F)
|
||||
fn visit_mut_children<F>(&mut self, mut f: F) -> Result<(), EvalError>
|
||||
where
|
||||
F: FnMut(&mut Self),
|
||||
F: FnMut(&mut Self) -> Result<(), EvalError>,
|
||||
{
|
||||
match self {
|
||||
ScalarExpr::Column(_)
|
||||
| ScalarExpr::Literal(_, _)
|
||||
| ScalarExpr::CallUnmaterializable(_) => (),
|
||||
| ScalarExpr::CallUnmaterializable(_) => Ok(()),
|
||||
ScalarExpr::CallUnary { expr, .. } => f(expr),
|
||||
ScalarExpr::CallBinary { expr1, expr2, .. } => {
|
||||
f(expr1);
|
||||
f(expr2);
|
||||
f(expr1)?;
|
||||
f(expr2)
|
||||
}
|
||||
ScalarExpr::CallVariadic { exprs, .. } => {
|
||||
for expr in exprs {
|
||||
f(expr);
|
||||
f(expr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
ScalarExpr::If { cond, then, els } => {
|
||||
f(cond);
|
||||
f(then);
|
||||
f(els);
|
||||
f(cond)?;
|
||||
f(then)?;
|
||||
f(els)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,7 +290,9 @@ impl ScalarExpr {
|
||||
if let ScalarExpr::CallUnmaterializable(UnmaterializableFunc::Now) = e {
|
||||
contains = true;
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
contains
|
||||
}
|
||||
|
||||
@@ -317,6 +352,8 @@ impl ScalarExpr {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use datatypes::arrow::array::Scalar;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_extract_bound() {
|
||||
@@ -390,9 +427,21 @@ mod test {
|
||||
// EvalError is not Eq, so we need to compare the error message
|
||||
match (actual, expected) {
|
||||
(Ok(l), Ok(r)) => assert_eq!(l, r),
|
||||
(Err(l), Err(r)) => assert!(matches!(l, r)),
|
||||
(l, r) => panic!("expected: {:?}, actual: {:?}", r, l),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_permute() {
|
||||
let mut expr = ScalarExpr::Column(4);
|
||||
let permutation = vec![1, 2, 3];
|
||||
let res = expr.permute(&permutation);
|
||||
assert!(matches!(res, Err(EvalError::InvalidArgument { .. })));
|
||||
|
||||
let mut expr = ScalarExpr::Column(0);
|
||||
let permute_map = BTreeMap::from([(1, 2), (3, 4)]);
|
||||
let res = expr.permute_map(&permute_map);
|
||||
assert!(matches!(res, Err(EvalError::InvalidArgument { .. })));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(unused)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(unused_imports)]
|
||||
// allow unused for now because it should be use later
|
||||
mod adapter;
|
||||
mod expr;
|
||||
mod plan;
|
||||
mod repr;
|
||||
|
||||
98
src/flow/src/plan.rs
Normal file
98
src/flow/src/plan.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! This module contain basic definition for dataflow's plan
|
||||
//! that can be translate to hydro dataflow
|
||||
|
||||
mod join;
|
||||
mod reduce;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub(crate) use self::reduce::{AccumulablePlan, KeyValPlan, ReducePlan};
|
||||
use crate::expr::{
|
||||
AggregateExpr, EvalError, Id, LocalId, MapFilterProject, SafeMfpPlan, ScalarExpr,
|
||||
};
|
||||
use crate::plan::join::JoinPlan;
|
||||
use crate::repr::{DiffRow, RelationType};
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
|
||||
pub struct TypedPlan {
|
||||
/// output type of the relation
|
||||
pub typ: RelationType,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
/// TODO(discord9): support `TableFunc`(by define FlatMap that map 1 to n)
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
|
||||
pub enum Plan {
|
||||
/// A constant collection of rows.
|
||||
Constant { rows: Vec<DiffRow> },
|
||||
/// Get CDC data from an source, be it external reference to an existing source or an internal
|
||||
/// reference to a `Let` identifier
|
||||
Get { id: Id },
|
||||
/// Create a temporary collection from given `value``, and make this bind only available
|
||||
/// in scope of `body`
|
||||
Let {
|
||||
id: LocalId,
|
||||
value: Box<Plan>,
|
||||
body: Box<Plan>,
|
||||
},
|
||||
/// Map, Filter, and Project operators.
|
||||
Mfp {
|
||||
/// The input collection.
|
||||
input: Box<Plan>,
|
||||
/// Linear operator to apply to each record.
|
||||
mfp: MapFilterProject,
|
||||
},
|
||||
/// Reduce operator, aggregation by key assembled from KeyValPlan
|
||||
Reduce {
|
||||
/// The input collection.
|
||||
input: Box<Plan>,
|
||||
/// A plan for changing input records into key, value pairs.
|
||||
key_val_plan: KeyValPlan,
|
||||
/// A plan for performing the reduce.
|
||||
///
|
||||
/// The implementation of reduction has several different strategies based
|
||||
/// on the properties of the reduction, and the input itself.
|
||||
reduce_plan: ReducePlan,
|
||||
},
|
||||
/// A multiway relational equijoin, with fused map, filter, and projection.
|
||||
///
|
||||
/// This stage performs a multiway join among `inputs`, using the equality
|
||||
/// constraints expressed in `plan`. The plan also describes the implementation
|
||||
/// strategy we will use, and any pushed down per-record work.
|
||||
Join {
|
||||
/// An ordered list of inputs that will be joined.
|
||||
inputs: Vec<Plan>,
|
||||
/// Detailed information about the implementation of the join.
|
||||
///
|
||||
/// This includes information about the implementation strategy, but also
|
||||
/// any map, filter, project work that we might follow the join with, but
|
||||
/// potentially pushed down into the implementation of the join.
|
||||
plan: JoinPlan,
|
||||
},
|
||||
/// Adds the contents of the input collections.
|
||||
///
|
||||
/// Importantly, this is *multiset* union, so the multiplicities of records will
|
||||
/// add. This is in contrast to *set* union, where the multiplicities would be
|
||||
/// capped at one. A set union can be formed with `Union` followed by `Reduce`
|
||||
/// implementing the "distinct" operator.
|
||||
Union {
|
||||
/// The input collections
|
||||
inputs: Vec<Plan>,
|
||||
/// Whether to consolidate the output, e.g., cancel negated records.
|
||||
consolidate_output: bool,
|
||||
},
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user