mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 20:32:56 +00:00
Compare commits
36 Commits
discord9-p
...
jkt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edd8cb6710 | ||
|
|
7ee61e5d28 | ||
|
|
1b30aca5a5 | ||
|
|
99b352cea1 | ||
|
|
0f521956bf | ||
|
|
aee72ab363 | ||
|
|
5b78d76fc5 | ||
|
|
a166430650 | ||
|
|
007a2b3dfe | ||
|
|
f35e957ddd | ||
|
|
68414bf593 | ||
|
|
5e836a0d1b | ||
|
|
f5e0da2fc8 | ||
|
|
fb96d26ebf | ||
|
|
0046d3f65b | ||
|
|
d7b97fc877 | ||
|
|
bfdaa28b25 | ||
|
|
6293bb1f5b | ||
|
|
8fa1ebcc3e | ||
|
|
c18c3f5839 | ||
|
|
629e72d8c0 | ||
|
|
e4065505ab | ||
|
|
aafd164483 | ||
|
|
1386e903d6 | ||
|
|
12692a940c | ||
|
|
4d44cbb8b2 | ||
|
|
f4911aa3bb | ||
|
|
5ac61f17bc | ||
|
|
e0d34c6d95 | ||
|
|
8a98b9c433 | ||
|
|
1f5d36a203 | ||
|
|
6fc7168893 | ||
|
|
2799d67212 | ||
|
|
d97a76c312 | ||
|
|
15caca244e | ||
|
|
8638075cdd |
37
.github/scripts/update-dev-builder-version.sh
vendored
37
.github/scripts/update-dev-builder-version.sh
vendored
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEV_BUILDER_IMAGE_TAG=$1
|
||||
|
||||
update_dev_builder_version() {
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Git configs.
|
||||
git config --global user.email greptimedb-ci@greptime.com
|
||||
git config --global user.name greptimedb-ci
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update the dev-builder image tag in the Makefile.
|
||||
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
git commit -m "ci: update dev-builder image tag"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "ci: update dev-builder image tag" \
|
||||
--body "This PR updates the dev-builder image tag" \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_dev_builder_version
|
||||
@@ -24,19 +24,11 @@ on:
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
update_dev_builder_image_tag:
|
||||
type: boolean
|
||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
# The jobs are triggered by the following events:
|
||||
# 1. Manually triggered workflow_dispatch event
|
||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
@@ -65,9 +57,9 @@ jobs:
|
||||
version: ${{ env.VERSION }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
@@ -93,7 +85,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -114,7 +106,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -135,7 +127,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -170,7 +162,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -184,7 +176,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -198,7 +190,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -209,24 +201,3 @@ jobs:
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
|
||||
update-dev-builder-image-tag:
|
||||
name: Update dev-builder image tag
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update dev-builder image tag
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-04-15-1a517ec8-20250428023155
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
|
||||
18
flake.lock
generated
18
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745735608,
|
||||
"narHash": "sha256-L0jzm815XBFfF2wCFmR+M1CF+beIEFj6SxlqVKF59Ec=",
|
||||
"lastModified": 1742452566,
|
||||
"narHash": "sha256-sVuLDQ2UIWfXUBbctzrZrXM2X05YjX08K7XHMztt36E=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "c39a78eba6ed2a022cc3218db90d485077101496",
|
||||
"rev": "7d9ba794daf5e8cc7ee728859bc688d8e26d5f06",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,11 +41,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1745487689,
|
||||
"narHash": "sha256-FQoi3R0NjQeBAsEOo49b5tbDPcJSMWc3QhhaIi9eddw=",
|
||||
"lastModified": 1743576891,
|
||||
"narHash": "sha256-vXiKURtntURybE6FMNFAVpRPr8+e8KoLPrYs9TGuAKc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5630cf13cceac06cefe9fc607e8dfa8fb342dde3",
|
||||
"rev": "44a69ed688786e98a101f02b712c313f1ade37ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1745694049,
|
||||
"narHash": "sha256-fxvRYH/tS7hGQeg9zCVh5RBcSWT+JGJet7RA8Ss+rC0=",
|
||||
"lastModified": 1742296961,
|
||||
"narHash": "sha256-gCpvEQOrugHWLimD1wTFOJHagnSEP6VYBDspq96Idu0=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "d8887c0758bbd2d5f752d5bd405d4491e90e7ed6",
|
||||
"rev": "15d87419f1a123d8f888d608129c3ce3ff8f13d4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||
sha256 = "sha256-arzEYlWLGGYeOhECHpBxQd2joZ4rPKV3qLNnZ+eql6A=";
|
||||
sha256 = "sha256-i0Sh/ZFFsHlZ3oFZFc24qdk6Cd8Do8OPU4HJQsrKOeM=";
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-12-25"
|
||||
channel = "nightly-2025-04-15"
|
||||
|
||||
@@ -36,8 +36,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use common_telemetry::{error, warn};
|
||||
use futures::future;
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
@@ -192,36 +192,6 @@ impl Database {
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
|
||||
/// is `max_retries * GRPC_CONN_TIMEOUT`
|
||||
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut retries = 0;
|
||||
let request = self.to_rpc_request(request);
|
||||
loop {
|
||||
let raw_response = client.handle(request.clone()).await;
|
||||
match (raw_response, retries < max_retries) {
|
||||
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
|
||||
(Err(err), true) => {
|
||||
// determine if the error is retryable
|
||||
if is_grpc_retryable(&err) {
|
||||
// retry
|
||||
retries += 1;
|
||||
warn!("Retrying {} times with error = {:?}", retries, err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
(Err(err), false) => {
|
||||
error!(
|
||||
"Failed to send request to grpc handle after {} retries, error = {:?}",
|
||||
retries, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
@@ -398,11 +368,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
/// by grpc standard, only `Unavailable` is retryable, see: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc
|
||||
pub fn is_grpc_retryable(err: &tonic::Status) -> bool {
|
||||
matches!(err.code(), tonic::Code::Unavailable)
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -132,7 +131,7 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Parser)]
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct StartCommand {
|
||||
/// The address to bind the gRPC server.
|
||||
#[clap(long, alias = "bind-addr")]
|
||||
@@ -172,27 +171,6 @@ pub struct StartCommand {
|
||||
backend: Option<BackendImpl>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StartCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("StartCommand")
|
||||
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
||||
.field("rpc_server_addr", &self.rpc_server_addr)
|
||||
.field("store_addrs", &self.sanitize_store_addrs())
|
||||
.field("config_file", &self.config_file)
|
||||
.field("selector", &self.selector)
|
||||
.field("use_memory_store", &self.use_memory_store)
|
||||
.field("enable_region_failover", &self.enable_region_failover)
|
||||
.field("http_addr", &self.http_addr)
|
||||
.field("http_timeout", &self.http_timeout)
|
||||
.field("env_prefix", &self.env_prefix)
|
||||
.field("data_home", &self.data_home)
|
||||
.field("store_key_prefix", &self.store_key_prefix)
|
||||
.field("max_txn_ops", &self.max_txn_ops)
|
||||
.field("backend", &self.backend)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||
let mut opts = MetasrvOptions::load_layered_options(
|
||||
@@ -206,15 +184,6 @@ impl StartCommand {
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
fn sanitize_store_addrs(&self) -> Option<Vec<String>> {
|
||||
self.store_addrs.as_ref().map(|addrs| {
|
||||
addrs
|
||||
.iter()
|
||||
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
|
||||
@@ -19,4 +19,4 @@ mod uddsketch_state;
|
||||
pub use geo_path::{GeoPathAccumulator, GEO_PATH_NAME};
|
||||
pub(crate) use hll::HllStateType;
|
||||
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
|
||||
pub use uddsketch_state::{UddSketchState, UDDSKETCH_MERGE_NAME, UDDSKETCH_STATE_NAME};
|
||||
pub use uddsketch_state::{UddSketchState, UDDSKETCH_STATE_NAME};
|
||||
|
||||
@@ -31,28 +31,23 @@ use datafusion::physical_plan::expressions::Literal;
|
||||
use datafusion::prelude::create_udaf;
|
||||
use datatypes::arrow::array::ArrayRef;
|
||||
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uddsketch::{SketchHashKey, UDDSketch};
|
||||
|
||||
pub const UDDSKETCH_STATE_NAME: &str = "uddsketch_state";
|
||||
|
||||
pub const UDDSKETCH_MERGE_NAME: &str = "uddsketch_merge";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug)]
|
||||
pub struct UddSketchState {
|
||||
uddsketch: UDDSketch,
|
||||
error_rate: f64,
|
||||
}
|
||||
|
||||
impl UddSketchState {
|
||||
pub fn new(bucket_size: u64, error_rate: f64) -> Self {
|
||||
Self {
|
||||
uddsketch: UDDSketch::new(bucket_size, error_rate),
|
||||
error_rate,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn state_udf_impl() -> AggregateUDF {
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
UDDSKETCH_STATE_NAME,
|
||||
vec![DataType::Int64, DataType::Float64, DataType::Float64],
|
||||
@@ -66,55 +61,18 @@ impl UddSketchState {
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a UDF for the `uddsketch_merge` function.
|
||||
///
|
||||
/// `uddsketch_merge` accepts bucket size, error rate, and a binary column of states generated by `uddsketch_state`
|
||||
/// and merges them into a single state.
|
||||
///
|
||||
/// The bucket size and error rate must be the same as the original state.
|
||||
pub fn merge_udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
UDDSKETCH_MERGE_NAME,
|
||||
vec![DataType::Int64, DataType::Float64, DataType::Binary],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(|args| {
|
||||
let (bucket_size, error_rate) = downcast_accumulator_args(args)?;
|
||||
Ok(Box::new(UddSketchState::new(bucket_size, error_rate)))
|
||||
}),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
fn update(&mut self, value: f64) {
|
||||
self.uddsketch.add_value(value);
|
||||
}
|
||||
|
||||
fn merge(&mut self, raw: &[u8]) -> DfResult<()> {
|
||||
if let Ok(uddsketch) = bincode::deserialize::<Self>(raw) {
|
||||
if uddsketch.uddsketch.count() != 0 {
|
||||
if self.uddsketch.max_allowed_buckets() != uddsketch.uddsketch.max_allowed_buckets()
|
||||
|| (self.error_rate - uddsketch.error_rate).abs() >= 1e-9
|
||||
{
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"Merging UDDSketch with different parameters: arguments={:?} vs actual input={:?}",
|
||||
(
|
||||
self.uddsketch.max_allowed_buckets(),
|
||||
self.error_rate
|
||||
),
|
||||
(uddsketch.uddsketch.max_allowed_buckets(), uddsketch.error_rate)
|
||||
)));
|
||||
}
|
||||
self.uddsketch.merge_sketch(&uddsketch.uddsketch);
|
||||
fn merge(&mut self, raw: &[u8]) {
|
||||
if let Ok(uddsketch) = bincode::deserialize::<UDDSketch>(raw) {
|
||||
if uddsketch.count() != 0 {
|
||||
self.uddsketch.merge_sketch(&uddsketch);
|
||||
}
|
||||
} else {
|
||||
trace!("Warning: Failed to deserialize UDDSketch from {:?}", raw);
|
||||
return Err(DataFusionError::Plan(
|
||||
"Failed to deserialize UDDSketch from binary".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,21 +113,9 @@ fn downcast_accumulator_args(args: AccumulatorArgs) -> DfResult<(u64, f64)> {
|
||||
impl DfAccumulator for UddSketchState {
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &values[2]; // the third column is data value
|
||||
match array.data_type() {
|
||||
DataType::Float64 => {
|
||||
let f64_array = as_primitive_array::<Float64Type>(array)?;
|
||||
for v in f64_array.iter().flatten() {
|
||||
self.update(v);
|
||||
}
|
||||
}
|
||||
// meaning instantiate as `uddsketch_merge`
|
||||
DataType::Binary => self.merge_batch(&[array.clone()])?,
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"UDDSketch functions do not support data type: {}",
|
||||
array.data_type()
|
||||
)
|
||||
}
|
||||
let f64_array = as_primitive_array::<Float64Type>(array)?;
|
||||
for v in f64_array.iter().flatten() {
|
||||
self.update(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -177,7 +123,7 @@ impl DfAccumulator for UddSketchState {
|
||||
|
||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||
Ok(ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self).map_err(|e| {
|
||||
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||
})?,
|
||||
)))
|
||||
@@ -204,7 +150,7 @@ impl DfAccumulator for UddSketchState {
|
||||
|
||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||
Ok(vec![ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self).map_err(|e| {
|
||||
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||
})?,
|
||||
))])
|
||||
@@ -214,7 +160,7 @@ impl DfAccumulator for UddSketchState {
|
||||
let array = &states[0];
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v)?;
|
||||
self.merge(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -236,8 +182,8 @@ mod tests {
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UddSketchState = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.uddsketch.count(), 3);
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
@@ -255,15 +201,13 @@ mod tests {
|
||||
// Create new state and merge the serialized data
|
||||
let mut new_state = UddSketchState::new(10, 0.01);
|
||||
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||
new_state.merge(bytes).unwrap();
|
||||
new_state.merge(bytes);
|
||||
|
||||
// Verify the merged state matches original by comparing deserialized values
|
||||
let original_sketch: UddSketchState = bincode::deserialize(bytes).unwrap();
|
||||
let original_sketch = original_sketch.uddsketch;
|
||||
let original_sketch: UDDSketch = bincode::deserialize(bytes).unwrap();
|
||||
let new_result = new_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(new_bytes)) = new_result {
|
||||
let new_sketch: UddSketchState = bincode::deserialize(&new_bytes).unwrap();
|
||||
let new_sketch = new_sketch.uddsketch;
|
||||
let new_sketch: UDDSketch = bincode::deserialize(&new_bytes).unwrap();
|
||||
assert_eq!(original_sketch.count(), new_sketch.count());
|
||||
assert_eq!(original_sketch.sum(), new_sketch.sum());
|
||||
assert_eq!(original_sketch.mean(), new_sketch.mean());
|
||||
@@ -300,8 +244,7 @@ mod tests {
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UddSketchState = bincode::deserialize(&bytes).unwrap();
|
||||
let deserialized = deserialized.uddsketch;
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
@@ -330,8 +273,7 @@ mod tests {
|
||||
|
||||
let result = merged_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UddSketchState = bincode::deserialize(&bytes).unwrap();
|
||||
let deserialized = deserialized.uddsketch;
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 2);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
|
||||
@@ -37,7 +37,7 @@ impl fmt::Display for RateFunction {
|
||||
|
||||
impl Function for RateFunction {
|
||||
fn name(&self) -> &str {
|
||||
"prom_rate"
|
||||
"rate"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
@@ -82,7 +82,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_rate_function() {
|
||||
let rate = RateFunction;
|
||||
assert_eq!("prom_rate", rate.name());
|
||||
assert_eq!("rate", rate.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
rate.return_type(&[]).unwrap()
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -24,5 +26,6 @@ pub(crate) struct TimestampFunction;
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
}
|
||||
|
||||
328
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
328
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
|
||||
use common_query::error::{
|
||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datafusion::arrow::compute::kernels::cmp::gt;
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
macro_rules! gt_time_types {
|
||||
($ty: ident, $columns:expr) => {{
|
||||
let column1 = $columns[0].to_arrow_array();
|
||||
let column2 = $columns[1].to_arrow_array();
|
||||
|
||||
let column1 = column1.as_primitive::<$ty>();
|
||||
let column2 = column2.as_primitive::<$ty>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
|
||||
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
|
||||
}};
|
||||
}
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
ensure!(
|
||||
input_types.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
input_types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
match &input_types[0] {
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
|
||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: input_types,
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
2,
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(
|
||||
&columns[0].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<TimestampMillisecondType>();
|
||||
let column2 = cast(
|
||||
&columns[1].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<TimestampMillisecondType>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
||||
TimestampType::Millisecond(_) => {
|
||||
gt_time_types!(TimestampMillisecondType, columns)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
gt_time_types!(TimestampMicrosecondType, columns)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
gt_time_types!(TimestampNanosecondType, columns)
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GreatestFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "GREATEST")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, Timestamp};
|
||||
use datatypes::types::{
|
||||
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
|
||||
TimestampSecondType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
"1970-01-01".to_string(),
|
||||
"2012-12-23".to_string(),
|
||||
])) as _,
|
||||
Arc::new(StringVector::from(vec![
|
||||
"2001-02-01".to_string(),
|
||||
"1999-01-01".to_string(),
|
||||
])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::date_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_datetime_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! test_timestamp {
|
||||
($type: expr,$unit: ident) => {
|
||||
paste! {
|
||||
#[test]
|
||||
fn [<test_greatest_takes_ $unit:lower _vector>]() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[$type, $type]).unwrap(),
|
||||
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
Nanosecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
Microsecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
Millisecond
|
||||
);
|
||||
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
|
||||
}
|
||||
@@ -217,9 +217,7 @@ pub enum Instruction {
|
||||
/// Invalidates batch cache.
|
||||
InvalidateCaches(Vec<CacheIdent>),
|
||||
/// Flushes regions.
|
||||
FlushRegions(FlushRegions),
|
||||
/// Flushes a single region.
|
||||
FlushRegion(RegionId),
|
||||
FlushRegion(FlushRegions),
|
||||
}
|
||||
|
||||
/// The reply of [UpgradeRegion].
|
||||
@@ -250,7 +248,6 @@ pub enum InstructionReply {
|
||||
CloseRegion(SimpleReply),
|
||||
UpgradeRegion(UpgradeRegionReply),
|
||||
DowngradeRegion(DowngradeRegionReply),
|
||||
FlushRegion(SimpleReply),
|
||||
}
|
||||
|
||||
impl Display for InstructionReply {
|
||||
@@ -262,7 +259,6 @@ impl Display for InstructionReply {
|
||||
Self::DowngradeRegion(reply) => {
|
||||
write!(f, "InstructionReply::DowngradeRegion({})", reply)
|
||||
}
|
||||
Self::FlushRegion(reply) => write!(f, "InstructionReply::FlushRegion({})", reply),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ pub mod test_utils;
|
||||
mod tombstone;
|
||||
pub mod topic_name;
|
||||
pub mod topic_region;
|
||||
pub mod txn_helper;
|
||||
pub(crate) mod txn_helper;
|
||||
pub mod view_info;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
|
||||
@@ -25,7 +25,7 @@ pub struct TxnOpGetResponseSet(Vec<KeyValue>);
|
||||
|
||||
impl TxnOpGetResponseSet {
|
||||
/// Returns a filter to consume a [KeyValue] where the key equals `key`.
|
||||
pub fn filter(key: Vec<u8>) -> impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>> {
|
||||
pub(crate) fn filter(key: Vec<u8>) -> impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>> {
|
||||
move |set| {
|
||||
let pos = set.0.iter().position(|kv| kv.key == key);
|
||||
match pos {
|
||||
@@ -36,7 +36,7 @@ impl TxnOpGetResponseSet {
|
||||
}
|
||||
|
||||
/// Returns a decoder to decode bytes to `DeserializedValueWithBytes<T>`.
|
||||
pub fn decode_with<F, T>(
|
||||
pub(crate) fn decode_with<F, T>(
|
||||
mut f: F,
|
||||
) -> impl FnMut(&mut TxnOpGetResponseSet) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||
where
|
||||
|
||||
@@ -35,7 +35,7 @@ pub mod memory;
|
||||
pub mod rds;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
pub mod util;
|
||||
|
||||
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Removes sensitive information like passwords from connection strings.
|
||||
///
|
||||
/// This function sanitizes connection strings by removing credentials:
|
||||
/// - For URL format (mysql://user:password@host:port/db): Removes everything before '@'
|
||||
/// - For parameter format (host=localhost password=secret): Removes the password parameter
|
||||
/// - For URL format without credentials (mysql://host:port/db): Removes the protocol prefix
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `conn_str` - The connection string to sanitize
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A sanitized version of the connection string with sensitive information removed
|
||||
pub fn sanitize_connection_string(conn_str: &str) -> String {
|
||||
// Case 1: URL format with credentials (mysql://user:password@host:port/db)
|
||||
// Extract everything after the '@' symbol
|
||||
if let Some(at_pos) = conn_str.find('@') {
|
||||
return conn_str[at_pos + 1..].to_string();
|
||||
}
|
||||
|
||||
// Case 2: Parameter format with password (host=localhost password=secret dbname=mydb)
|
||||
// Filter out any parameter that starts with "password="
|
||||
if conn_str.contains("password=") {
|
||||
return conn_str
|
||||
.split_whitespace()
|
||||
.filter(|param| !param.starts_with("password="))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
// Case 3: URL format without credentials (mysql://host:port/db)
|
||||
// Extract everything after the protocol prefix
|
||||
if let Some(host_part) = conn_str.split("://").nth(1) {
|
||||
return host_part.to_string();
|
||||
}
|
||||
|
||||
// Case 4: Already sanitized or unknown format
|
||||
// Return as is
|
||||
conn_str.to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_connection_string() {
|
||||
// Test URL format with username/password
|
||||
let conn_str = "mysql://user:password123@localhost:3306/db";
|
||||
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
|
||||
|
||||
// Test URL format without credentials
|
||||
let conn_str = "mysql://localhost:3306/db";
|
||||
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
|
||||
|
||||
// Test parameter format with password
|
||||
let conn_str = "host=localhost port=5432 user=postgres password=secret dbname=mydb";
|
||||
assert_eq!(
|
||||
sanitize_connection_string(conn_str),
|
||||
"host=localhost port=5432 user=postgres dbname=mydb"
|
||||
);
|
||||
|
||||
// Test parameter format without password
|
||||
let conn_str = "host=localhost port=5432 user=postgres dbname=mydb";
|
||||
assert_eq!(
|
||||
sanitize_connection_string(conn_str),
|
||||
"host=localhost port=5432 user=postgres dbname=mydb"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,6 @@ pub struct RegionHeartbeatResponseHandler {
|
||||
region_server: RegionServer,
|
||||
catchup_tasks: TaskTracker<()>,
|
||||
downgrade_tasks: TaskTracker<()>,
|
||||
flush_tasks: TaskTracker<()>,
|
||||
}
|
||||
|
||||
/// Handler of the instruction.
|
||||
@@ -51,7 +50,6 @@ pub struct HandlerContext {
|
||||
region_server: RegionServer,
|
||||
catchup_tasks: TaskTracker<()>,
|
||||
downgrade_tasks: TaskTracker<()>,
|
||||
flush_tasks: TaskTracker<()>,
|
||||
}
|
||||
|
||||
impl HandlerContext {
|
||||
@@ -65,7 +63,6 @@ impl HandlerContext {
|
||||
region_server,
|
||||
catchup_tasks: TaskTracker::new(),
|
||||
downgrade_tasks: TaskTracker::new(),
|
||||
flush_tasks: TaskTracker::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -77,7 +74,6 @@ impl RegionHeartbeatResponseHandler {
|
||||
region_server,
|
||||
catchup_tasks: TaskTracker::new(),
|
||||
downgrade_tasks: TaskTracker::new(),
|
||||
flush_tasks: TaskTracker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,11 +95,8 @@ impl RegionHeartbeatResponseHandler {
|
||||
handler_context.handle_upgrade_region_instruction(upgrade_region)
|
||||
})),
|
||||
Instruction::InvalidateCaches(_) => InvalidHeartbeatResponseSnafu.fail(),
|
||||
Instruction::FlushRegions(flush_regions) => Ok(Box::new(move |handler_context| {
|
||||
handler_context.handle_flush_regions_instruction(flush_regions)
|
||||
})),
|
||||
Instruction::FlushRegion(flush_region) => Ok(Box::new(move |handler_context| {
|
||||
handler_context.handle_flush_region_instruction(flush_region)
|
||||
Instruction::FlushRegion(flush_regions) => Ok(Box::new(move |handler_context| {
|
||||
handler_context.handle_flush_region_instruction(flush_regions)
|
||||
})),
|
||||
}
|
||||
}
|
||||
@@ -118,7 +111,6 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
| Some((_, Instruction::CloseRegion { .. }))
|
||||
| Some((_, Instruction::DowngradeRegion { .. }))
|
||||
| Some((_, Instruction::UpgradeRegion { .. }))
|
||||
| Some((_, Instruction::FlushRegion { .. }))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -132,14 +124,12 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
let region_server = self.region_server.clone();
|
||||
let catchup_tasks = self.catchup_tasks.clone();
|
||||
let downgrade_tasks = self.downgrade_tasks.clone();
|
||||
let flush_tasks = self.flush_tasks.clone();
|
||||
let handler = Self::build_handler(instruction)?;
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
let reply = handler(HandlerContext {
|
||||
region_server,
|
||||
catchup_tasks,
|
||||
downgrade_tasks,
|
||||
flush_tasks,
|
||||
})
|
||||
.await;
|
||||
|
||||
|
||||
@@ -12,17 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::instruction::{FlushRegions, InstructionReply, SimpleReply};
|
||||
use common_meta::instruction::{FlushRegions, InstructionReply};
|
||||
use common_telemetry::warn;
|
||||
use futures_util::future::BoxFuture;
|
||||
use store_api::region_request::{RegionFlushRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error;
|
||||
use crate::heartbeat::handler::HandlerContext;
|
||||
|
||||
impl HandlerContext {
|
||||
pub(crate) fn handle_flush_regions_instruction(
|
||||
pub(crate) fn handle_flush_region_instruction(
|
||||
self,
|
||||
flush_regions: FlushRegions,
|
||||
) -> BoxFuture<'static, Option<InstructionReply>> {
|
||||
@@ -50,59 +49,6 @@ impl HandlerContext {
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn handle_flush_region_instruction(
|
||||
self,
|
||||
region_id: RegionId,
|
||||
) -> BoxFuture<'static, Option<InstructionReply>> {
|
||||
Box::pin(async move {
|
||||
let Some(writable) = self.region_server.is_region_leader(region_id) else {
|
||||
return Some(InstructionReply::FlushRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some("Region is not leader".to_string()),
|
||||
}));
|
||||
};
|
||||
|
||||
if !writable {
|
||||
return Some(InstructionReply::FlushRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some("Region is not writable".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
let region_server_moved = self.region_server.clone();
|
||||
let register_result = self
|
||||
.flush_tasks
|
||||
.try_register(
|
||||
region_id,
|
||||
Box::pin(async move {
|
||||
let request = RegionRequest::Flush(RegionFlushRequest {
|
||||
row_group_size: None,
|
||||
});
|
||||
region_server_moved
|
||||
.handle_request(region_id, request)
|
||||
.await?;
|
||||
Ok(())
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
if register_result.is_busy() {
|
||||
warn!("Another flush task is running for the region: {region_id}");
|
||||
}
|
||||
let mut watcher = register_result.into_watcher();
|
||||
let result = self.flush_tasks.wait_until_finish(&mut watcher).await;
|
||||
match result {
|
||||
Ok(()) => Some(InstructionReply::FlushRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
})),
|
||||
Err(err) => Some(InstructionReply::FlushRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some(format!("{err:?}")),
|
||||
})),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -138,7 +84,7 @@ mod tests {
|
||||
|
||||
let reply = handler_context
|
||||
.clone()
|
||||
.handle_flush_regions_instruction(FlushRegions {
|
||||
.handle_flush_region_instruction(FlushRegions {
|
||||
region_ids: region_ids.clone(),
|
||||
})
|
||||
.await;
|
||||
@@ -148,7 +94,7 @@ mod tests {
|
||||
flushed_region_ids.write().unwrap().clear();
|
||||
let not_found_region_ids = (0..2).map(|i| RegionId::new(2048, i)).collect::<Vec<_>>();
|
||||
let reply = handler_context
|
||||
.handle_flush_regions_instruction(FlushRegions {
|
||||
.handle_flush_region_instruction(FlushRegions {
|
||||
region_ids: not_found_region_ids.clone(),
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -144,11 +144,6 @@ impl<T: Send + Sync + Clone + 'static> TaskTracker<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits for a [RegisterResult] and returns a [WaitResult].
|
||||
pub(crate) async fn wait_until_finish(&self, watcher: &mut TaskWatcher<T>) -> Result<T> {
|
||||
wait(watcher).await
|
||||
}
|
||||
|
||||
/// Tries to register a new async task, returns [RegisterResult::Busy] if previous task is running.
|
||||
pub(crate) async fn try_register(
|
||||
&self,
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::Array;
|
||||
use arrow::datatypes::Int32Type;
|
||||
use arrow_array::{ArrayRef, DictionaryArray, Int32Array};
|
||||
use arrow::datatypes::Int64Type;
|
||||
use arrow_array::{ArrayRef, DictionaryArray, Int64Array};
|
||||
use serde_json::Value as JsonValue;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -32,7 +32,7 @@ use crate::vectors::{self, Helper, Validity, Vector, VectorRef};
|
||||
/// Vector of dictionaries, basically backed by Arrow's `DictionaryArray`.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct DictionaryVector {
|
||||
array: DictionaryArray<Int32Type>,
|
||||
array: DictionaryArray<Int64Type>,
|
||||
/// The datatype of the items in the dictionary.
|
||||
item_type: ConcreteDataType,
|
||||
/// The vector of items in the dictionary.
|
||||
@@ -41,7 +41,7 @@ pub struct DictionaryVector {
|
||||
|
||||
impl DictionaryVector {
|
||||
/// Create a new instance of `DictionaryVector` from a dictionary array and item type
|
||||
pub fn new(array: DictionaryArray<Int32Type>, item_type: ConcreteDataType) -> Result<Self> {
|
||||
pub fn new(array: DictionaryArray<Int64Type>, item_type: ConcreteDataType) -> Result<Self> {
|
||||
let item_vector = Helper::try_into_vector(array.values())?;
|
||||
|
||||
Ok(Self {
|
||||
@@ -52,12 +52,12 @@ impl DictionaryVector {
|
||||
}
|
||||
|
||||
/// Returns the underlying Arrow dictionary array
|
||||
pub fn array(&self) -> &DictionaryArray<Int32Type> {
|
||||
pub fn array(&self) -> &DictionaryArray<Int64Type> {
|
||||
&self.array
|
||||
}
|
||||
|
||||
/// Returns the keys array of this dictionary
|
||||
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int32Type> {
|
||||
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int64Type> {
|
||||
self.array.keys()
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ impl DictionaryVector {
|
||||
impl Vector for DictionaryVector {
|
||||
fn data_type(&self) -> ConcreteDataType {
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
self.item_type.clone(),
|
||||
))
|
||||
}
|
||||
@@ -163,10 +163,10 @@ impl Serializable for DictionaryVector {
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<DictionaryArray<Int32Type>> for DictionaryVector {
|
||||
impl TryFrom<DictionaryArray<Int64Type>> for DictionaryVector {
|
||||
type Error = crate::error::Error;
|
||||
|
||||
fn try_from(array: DictionaryArray<Int32Type>) -> Result<Self> {
|
||||
fn try_from(array: DictionaryArray<Int64Type>) -> Result<Self> {
|
||||
let item_type = ConcreteDataType::from_arrow_type(array.values().data_type());
|
||||
let item_vector = Helper::try_into_vector(array.values())?;
|
||||
|
||||
@@ -243,7 +243,7 @@ impl VectorOp for DictionaryVector {
|
||||
previous_offset = offset;
|
||||
}
|
||||
|
||||
let new_keys = Int32Array::from(replicated_keys);
|
||||
let new_keys = Int64Array::from(replicated_keys);
|
||||
let new_array = DictionaryArray::try_new(new_keys, self.values().clone())
|
||||
.expect("Failed to create replicated dictionary array");
|
||||
|
||||
@@ -261,7 +261,7 @@ impl VectorOp for DictionaryVector {
|
||||
let filtered_key_array = filtered_key_vector.to_arrow_array();
|
||||
let filtered_key_array = filtered_key_array
|
||||
.as_any()
|
||||
.downcast_ref::<Int32Array>()
|
||||
.downcast_ref::<Int64Array>()
|
||||
.unwrap();
|
||||
|
||||
let new_array = DictionaryArray::try_new(filtered_key_array.clone(), self.values().clone())
|
||||
@@ -291,7 +291,7 @@ impl VectorOp for DictionaryVector {
|
||||
let key_vector = Helper::try_into_vector(&key_array)?;
|
||||
let new_key_vector = key_vector.take(indices)?;
|
||||
let new_key_array = new_key_vector.to_arrow_array();
|
||||
let new_key_array = new_key_array.as_any().downcast_ref::<Int32Array>().unwrap();
|
||||
let new_key_array = new_key_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
let new_array = DictionaryArray::try_new(new_key_array.clone(), self.values().clone())
|
||||
.expect("Failed to create filtered dictionary array");
|
||||
@@ -318,7 +318,7 @@ mod tests {
|
||||
// Keys: [0, 1, 2, null, 1, 3]
|
||||
// Resulting in: ["a", "b", "c", null, "b", "d"]
|
||||
let values = StringArray::from(vec!["a", "b", "c", "d"]);
|
||||
let keys = Int32Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
|
||||
let keys = Int64Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
|
||||
let dict_array = DictionaryArray::new(keys, Arc::new(values));
|
||||
DictionaryVector::try_from(dict_array).unwrap()
|
||||
}
|
||||
@@ -404,7 +404,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
casted.data_type(),
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
))
|
||||
);
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::sync::Arc;
|
||||
use arrow::array::{Array, ArrayRef, StringArray};
|
||||
use arrow::compute;
|
||||
use arrow::compute::kernels::comparison;
|
||||
use arrow::datatypes::{DataType as ArrowDataType, Int32Type, TimeUnit};
|
||||
use arrow::datatypes::{DataType as ArrowDataType, Int64Type, TimeUnit};
|
||||
use arrow_array::DictionaryArray;
|
||||
use arrow_schema::IntervalUnit;
|
||||
use datafusion_common::ScalarValue;
|
||||
@@ -348,11 +348,11 @@ impl Helper {
|
||||
ArrowDataType::Decimal128(_, _) => {
|
||||
Arc::new(Decimal128Vector::try_from_arrow_array(array)?)
|
||||
}
|
||||
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int32) => {
|
||||
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int64) => {
|
||||
let array = array
|
||||
.as_ref()
|
||||
.as_any()
|
||||
.downcast_ref::<DictionaryArray<Int32Type>>()
|
||||
.downcast_ref::<DictionaryArray<Int64Type>>()
|
||||
.unwrap(); // Safety: the type is guarded by match arm condition
|
||||
Arc::new(DictionaryVector::new(
|
||||
array.clone(),
|
||||
|
||||
@@ -37,12 +37,11 @@ use tokio::sync::{Mutex, RwLock};
|
||||
|
||||
use crate::adapter::{CreateFlowArgs, StreamingEngine};
|
||||
use crate::batching_mode::engine::BatchingEngine;
|
||||
use crate::batching_mode::{FRONTEND_SCAN_TIMEOUT, MIN_REFRESH_DURATION};
|
||||
use crate::engine::FlowEngine;
|
||||
use crate::error::{
|
||||
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, IllegalCheckTaskStateSnafu,
|
||||
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, NoAvailableFrontendSnafu,
|
||||
SyncCheckTaskSnafu, UnexpectedSnafu,
|
||||
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, SyncCheckTaskSnafu,
|
||||
UnexpectedSnafu,
|
||||
};
|
||||
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
||||
use crate::repr::{self, DiffRow};
|
||||
@@ -82,11 +81,6 @@ impl FlowDualEngine {
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine if the engine is in distributed mode
|
||||
pub fn is_distributed(&self) -> bool {
|
||||
self.streaming_engine.node_id.is_some()
|
||||
}
|
||||
|
||||
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
|
||||
self.streaming_engine.clone()
|
||||
}
|
||||
@@ -95,39 +89,6 @@ impl FlowDualEngine {
|
||||
self.batching_engine.clone()
|
||||
}
|
||||
|
||||
/// In distributed mode, scan periodically(1s) until available frontend is found, or timeout,
|
||||
/// in standalone mode, return immediately
|
||||
/// notice here if any frontend appear in cluster info this function will return immediately
|
||||
async fn wait_for_available_frontend(&self, timeout: std::time::Duration) -> Result<(), Error> {
|
||||
if !self.is_distributed() {
|
||||
return Ok(());
|
||||
}
|
||||
let frontend_client = self.batching_engine().frontend_client.clone();
|
||||
let sleep_duration = std::time::Duration::from_millis(1_000);
|
||||
let now = std::time::Instant::now();
|
||||
loop {
|
||||
let frontend_list = frontend_client.scan_for_frontend().await?;
|
||||
if !frontend_list.is_empty() {
|
||||
let fe_list = frontend_list
|
||||
.iter()
|
||||
.map(|(_, info)| &info.peer.addr)
|
||||
.collect::<Vec<_>>();
|
||||
info!("Available frontend found: {:?}", fe_list);
|
||||
return Ok(());
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
tokio::time::sleep(sleep_duration).await;
|
||||
info!("Waiting for available frontend, elapsed={:?}", elapsed);
|
||||
if elapsed >= timeout {
|
||||
return NoAvailableFrontendSnafu {
|
||||
timeout,
|
||||
context: "No available frontend found in cluster info",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
|
||||
///
|
||||
/// the need to sync is to make sure flush flow actually get called
|
||||
@@ -377,36 +338,18 @@ struct ConsistentCheckTask {
|
||||
|
||||
impl ConsistentCheckTask {
|
||||
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
|
||||
let engine = engine.clone();
|
||||
// first do recover flows
|
||||
engine.check_flow_consistent(true, false).await?;
|
||||
|
||||
let inner = engine.clone();
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
|
||||
let (trigger_tx, mut trigger_rx) =
|
||||
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
|
||||
let handle = common_runtime::spawn_global(async move {
|
||||
// first check if available frontend is found
|
||||
if let Err(err) = engine
|
||||
.wait_for_available_frontend(FRONTEND_SCAN_TIMEOUT)
|
||||
.await
|
||||
{
|
||||
warn!("No frontend is available yet:\n {err:?}");
|
||||
}
|
||||
|
||||
// then do recover flows, if failed, always retry
|
||||
let mut recover_retry = 0;
|
||||
while let Err(err) = engine.check_flow_consistent(true, false).await {
|
||||
recover_retry += 1;
|
||||
error!(
|
||||
"Failed to recover flows:\n {err:?}, retry {} in {}s",
|
||||
recover_retry,
|
||||
MIN_REFRESH_DURATION.as_secs()
|
||||
);
|
||||
tokio::time::sleep(MIN_REFRESH_DURATION).await;
|
||||
}
|
||||
|
||||
// then do check flows, with configurable allow_create and allow_drop
|
||||
let (mut allow_create, mut allow_drop) = (false, false);
|
||||
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
|
||||
loop {
|
||||
if let Err(err) = engine.check_flow_consistent(allow_create, allow_drop).await {
|
||||
if let Err(err) = inner.check_flow_consistent(allow_create, allow_drop).await {
|
||||
error!(err; "Failed to check flow consistent");
|
||||
}
|
||||
if let Some(done) = ret_signal.take() {
|
||||
@@ -591,12 +534,7 @@ impl FlowEngine for FlowDualEngine {
|
||||
match flow_type {
|
||||
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
|
||||
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
|
||||
None => {
|
||||
warn!(
|
||||
"Currently flow={flow_id} doesn't exist in flownode, ignore flush_flow request"
|
||||
);
|
||||
Ok(0)
|
||||
}
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,19 +31,10 @@ pub const DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(
|
||||
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
||||
|
||||
/// The minimum duration between two queries execution by batching mode task
|
||||
pub const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||
|
||||
/// Grpc connection timeout
|
||||
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Grpc max retry number
|
||||
const GRPC_MAX_RETRIES: u32 = 3;
|
||||
|
||||
/// Flow wait for available frontend timeout,
|
||||
/// if failed to find available frontend after FRONTEND_SCAN_TIMEOUT elapsed, return error
|
||||
/// which should prevent flownode from starting
|
||||
pub const FRONTEND_SCAN_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
||||
/// Frontend activity timeout
|
||||
/// if frontend is down(not sending heartbeat) for more than FRONTEND_ACTIVITY_TIMEOUT, it will be removed from the list that flownode use to connect
|
||||
pub const FRONTEND_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
@@ -49,8 +49,7 @@ use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
||||
pub struct BatchingEngine {
|
||||
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
|
||||
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
|
||||
/// frontend client for insert request
|
||||
pub(crate) frontend_client: Arc<FrontendClient>,
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::CreateTableExpr;
|
||||
@@ -27,17 +26,15 @@ use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use common_query::Output;
|
||||
use common_telemetry::warn;
|
||||
use itertools::Itertools;
|
||||
use meta_client::client::MetaClient;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
||||
GRPC_MAX_RETRIES,
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, GRPC_CONN_TIMEOUT, GRPC_MAX_RETRIES,
|
||||
};
|
||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
|
||||
use crate::Error;
|
||||
|
||||
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
||||
@@ -130,24 +127,10 @@ impl DatabaseWithPeer {
|
||||
fn new(database: Database, peer: Peer) -> Self {
|
||||
Self { database, peer }
|
||||
}
|
||||
|
||||
/// Try sending a "SELECT 1" to the database
|
||||
async fn try_select_one(&self) -> Result<(), Error> {
|
||||
// notice here use `sql` for `SELECT 1` return 1 row
|
||||
let _ = self
|
||||
.database
|
||||
.sql("SELECT 1")
|
||||
.await
|
||||
.with_context(|_| InvalidRequestSnafu {
|
||||
context: format!("Failed to handle `SELECT 1` request at {:?}", self.peer),
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl FrontendClient {
|
||||
/// scan for available frontend from metadata
|
||||
pub(crate) async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
||||
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
||||
let Self::Distributed { meta_client, .. } = self else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
@@ -177,8 +160,8 @@ impl FrontendClient {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get the database with maximum `last_activity_ts`& is able to process query
|
||||
async fn get_latest_active_frontend(
|
||||
/// Get the database with max `last_activity_ts`
|
||||
async fn get_last_active_frontend(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
@@ -194,50 +177,22 @@ impl FrontendClient {
|
||||
.fail();
|
||||
};
|
||||
|
||||
let mut interval = tokio::time::interval(GRPC_CONN_TIMEOUT);
|
||||
interval.tick().await;
|
||||
for retry in 0..GRPC_MAX_RETRIES {
|
||||
let frontends = self.scan_for_frontend().await?;
|
||||
let now_in_ms = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as i64;
|
||||
let frontends = self.scan_for_frontend().await?;
|
||||
let mut peer = None;
|
||||
|
||||
// found node with maximum last_activity_ts
|
||||
for (_, node_info) in frontends
|
||||
.iter()
|
||||
.sorted_by_key(|(_, node_info)| node_info.last_activity_ts)
|
||||
.rev()
|
||||
// filter out frontend that have been down for more than 1 min
|
||||
.filter(|(_, node_info)| {
|
||||
node_info.last_activity_ts + FRONTEND_ACTIVITY_TIMEOUT.as_millis() as i64
|
||||
> now_in_ms
|
||||
})
|
||||
{
|
||||
let addr = &node_info.peer.addr;
|
||||
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![addr.clone()]);
|
||||
let database = Database::new(catalog, schema, client);
|
||||
let db = DatabaseWithPeer::new(database, node_info.peer.clone());
|
||||
match db.try_select_one().await {
|
||||
Ok(_) => return Ok(db),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to connect to frontend {} on retry={}: \n{e:?}",
|
||||
addr, retry
|
||||
);
|
||||
}
|
||||
}
|
||||
if let Some((_, val)) = frontends.iter().max_by_key(|(_, val)| val.last_activity_ts) {
|
||||
peer = Some(val.peer.clone());
|
||||
}
|
||||
|
||||
let Some(peer) = peer else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("No frontend available: {:?}", frontends),
|
||||
}
|
||||
// no available frontend
|
||||
// sleep and retry
|
||||
interval.tick().await;
|
||||
}
|
||||
|
||||
NoAvailableFrontendSnafu {
|
||||
timeout: GRPC_CONN_TIMEOUT,
|
||||
context: "No available frontend found that is able to process query",
|
||||
}
|
||||
.fail()
|
||||
.fail()?
|
||||
};
|
||||
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![peer.addr.clone()]);
|
||||
let database = Database::new(catalog, schema, client);
|
||||
Ok(DatabaseWithPeer::new(database, peer))
|
||||
}
|
||||
|
||||
pub async fn create(
|
||||
@@ -267,18 +222,38 @@ impl FrontendClient {
|
||||
) -> Result<u32, Error> {
|
||||
match self {
|
||||
FrontendClient::Distributed { .. } => {
|
||||
let db = self.get_latest_active_frontend(catalog, schema).await?;
|
||||
let db = self.get_last_active_frontend(catalog, schema).await?;
|
||||
|
||||
*peer_desc = Some(PeerDesc::Dist {
|
||||
peer: db.peer.clone(),
|
||||
});
|
||||
|
||||
db.database
|
||||
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
||||
.await
|
||||
.with_context(|_| InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request at {:?}: {:?}", db.peer, req),
|
||||
})
|
||||
let mut retry = 0;
|
||||
|
||||
loop {
|
||||
let ret = db.database.handle(req.clone()).await.with_context(|_| {
|
||||
InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request: {:?}", req),
|
||||
}
|
||||
});
|
||||
if let Err(err) = ret {
|
||||
if retry < GRPC_MAX_RETRIES {
|
||||
retry += 1;
|
||||
warn!(
|
||||
"Failed to send request to grpc handle at Peer={:?}, retry = {}, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
continue;
|
||||
} else {
|
||||
common_telemetry::error!(
|
||||
"Failed to send request to grpc handle at Peer={:?} after {} retries, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
FrontendClient::Standalone { database_client } => {
|
||||
let ctx = QueryContextBuilder::default()
|
||||
|
||||
@@ -61,16 +61,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"No available frontend found after timeout: {timeout:?}, context: {context}"
|
||||
))]
|
||||
NoAvailableFrontend {
|
||||
timeout: std::time::Duration,
|
||||
context: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
source: BoxedError,
|
||||
@@ -306,8 +296,7 @@ impl ErrorExt for Error {
|
||||
Self::Eval { .. }
|
||||
| Self::JoinTask { .. }
|
||||
| Self::Datafusion { .. }
|
||||
| Self::InsertIntoFlow { .. }
|
||||
| Self::NoAvailableFrontend { .. } => StatusCode::Internal,
|
||||
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
Self::TableNotFound { .. }
|
||||
| Self::TableNotFoundMeta { .. }
|
||||
|
||||
@@ -172,8 +172,6 @@ impl FlownodeServer {
|
||||
}
|
||||
|
||||
/// Start the background task for streaming computation.
|
||||
///
|
||||
/// Should be called only after heartbeat is establish, hence can get cluster info
|
||||
async fn start_workers(&self) -> Result<(), Error> {
|
||||
let manager_ref = self.inner.flow_service.dual_engine.clone();
|
||||
let handle = manager_ref
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
pub mod builder;
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
use std::fmt::Display;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
@@ -48,7 +48,6 @@ use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::http::HttpOptions;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
|
||||
@@ -66,7 +65,7 @@ use crate::procedure::wal_prune::manager::WalPruneTickerRef;
|
||||
use crate::procedure::ProcedureManagerListenerAdapter;
|
||||
use crate::pubsub::{PublisherRef, SubscriptionManagerRef};
|
||||
use crate::region::supervisor::RegionSupervisorTickerRef;
|
||||
use crate::selector::{RegionStatAwareSelector, Selector, SelectorType};
|
||||
use crate::selector::{Selector, SelectorType};
|
||||
use crate::service::mailbox::MailboxRef;
|
||||
use crate::service::store::cached_kv::LeaderCachedKvBackend;
|
||||
use crate::state::{become_follower, become_leader, StateRef};
|
||||
@@ -97,7 +96,7 @@ pub enum BackendImpl {
|
||||
MysqlStore,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct MetasrvOptions {
|
||||
/// The address the server listens on.
|
||||
@@ -167,47 +166,6 @@ pub struct MetasrvOptions {
|
||||
pub node_max_idle_time: Duration,
|
||||
}
|
||||
|
||||
impl fmt::Debug for MetasrvOptions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut debug_struct = f.debug_struct("MetasrvOptions");
|
||||
debug_struct
|
||||
.field("bind_addr", &self.bind_addr)
|
||||
.field("server_addr", &self.server_addr)
|
||||
.field("store_addrs", &self.sanitize_store_addrs())
|
||||
.field("selector", &self.selector)
|
||||
.field("use_memory_store", &self.use_memory_store)
|
||||
.field("enable_region_failover", &self.enable_region_failover)
|
||||
.field(
|
||||
"allow_region_failover_on_local_wal",
|
||||
&self.allow_region_failover_on_local_wal,
|
||||
)
|
||||
.field("http", &self.http)
|
||||
.field("logging", &self.logging)
|
||||
.field("procedure", &self.procedure)
|
||||
.field("failure_detector", &self.failure_detector)
|
||||
.field("datanode", &self.datanode)
|
||||
.field("enable_telemetry", &self.enable_telemetry)
|
||||
.field("data_home", &self.data_home)
|
||||
.field("wal", &self.wal)
|
||||
.field("export_metrics", &self.export_metrics)
|
||||
.field("store_key_prefix", &self.store_key_prefix)
|
||||
.field("max_txn_ops", &self.max_txn_ops)
|
||||
.field("flush_stats_factor", &self.flush_stats_factor)
|
||||
.field("tracing", &self.tracing)
|
||||
.field("backend", &self.backend);
|
||||
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
debug_struct.field("meta_table_name", &self.meta_table_name);
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
|
||||
|
||||
debug_struct
|
||||
.field("node_max_idle_time", &self.node_max_idle_time)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||
|
||||
impl Default for MetasrvOptions {
|
||||
@@ -291,13 +249,6 @@ impl MetasrvOptions {
|
||||
common_telemetry::debug!("detect local IP is not supported on Android");
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_store_addrs(&self) -> Vec<String> {
|
||||
self.store_addrs
|
||||
.iter()
|
||||
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MetasrvInfo {
|
||||
@@ -387,8 +338,6 @@ pub struct SelectorContext {
|
||||
}
|
||||
|
||||
pub type SelectorRef = Arc<dyn Selector<Context = SelectorContext, Output = Vec<Peer>>>;
|
||||
pub type RegionStatAwareSelectorRef =
|
||||
Arc<dyn RegionStatAwareSelector<Context = SelectorContext, Output = Vec<(RegionId, Peer)>>>;
|
||||
pub type ElectionRef = Arc<dyn Election<Leader = LeaderValue>>;
|
||||
|
||||
pub struct MetaStateHandler {
|
||||
|
||||
@@ -40,7 +40,7 @@ use common_meta::state_store::KvStateStore;
|
||||
use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::{info, warn};
|
||||
use common_telemetry::warn;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
||||
@@ -54,16 +54,16 @@ use crate::handler::region_lease_handler::{CustomizedRegionLeaseRenewerRef, Regi
|
||||
use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatMailbox, Pushers};
|
||||
use crate::lease::MetaPeerLookupService;
|
||||
use crate::metasrv::{
|
||||
ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, RegionStatAwareSelectorRef, SelectTarget,
|
||||
SelectorContext, SelectorRef, FLOW_ID_SEQ, TABLE_ID_SEQ,
|
||||
ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, SelectTarget, SelectorContext, SelectorRef,
|
||||
FLOW_ID_SEQ, TABLE_ID_SEQ,
|
||||
};
|
||||
use crate::procedure::region_migration::manager::RegionMigrationManager;
|
||||
use crate::procedure::region_migration::DefaultContextFactory;
|
||||
use crate::procedure::wal_prune::manager::{WalPruneManager, WalPruneTicker};
|
||||
use crate::procedure::wal_prune::Context as WalPruneContext;
|
||||
use crate::region::supervisor::{
|
||||
HeartbeatAcceptor, RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorSelector,
|
||||
RegionSupervisorTicker, DEFAULT_TICK_INTERVAL,
|
||||
HeartbeatAcceptor, RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorTicker,
|
||||
DEFAULT_TICK_INTERVAL,
|
||||
};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::selector::round_robin::RoundRobinSelector;
|
||||
@@ -320,24 +320,13 @@ impl MetasrvBuilder {
|
||||
),
|
||||
));
|
||||
region_migration_manager.try_start()?;
|
||||
let region_supervisor_selector = plugins
|
||||
.as_ref()
|
||||
.and_then(|plugins| plugins.get::<RegionStatAwareSelectorRef>());
|
||||
|
||||
let supervisor_selector = match region_supervisor_selector {
|
||||
Some(selector) => {
|
||||
info!("Using region stat aware selector");
|
||||
RegionSupervisorSelector::RegionStatAwareSelector(selector)
|
||||
}
|
||||
None => RegionSupervisorSelector::NaiveSelector(selector.clone()),
|
||||
};
|
||||
|
||||
let region_failover_handler = if options.enable_region_failover {
|
||||
let region_supervisor = RegionSupervisor::new(
|
||||
rx,
|
||||
options.failure_detector,
|
||||
selector_ctx.clone(),
|
||||
supervisor_selector,
|
||||
selector.clone(),
|
||||
region_migration_manager.clone(),
|
||||
maintenance_mode_manager.clone(),
|
||||
peer_lookup_service.clone(),
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
pub(crate) mod close_downgraded_region;
|
||||
pub(crate) mod downgrade_leader_region;
|
||||
pub(crate) mod flush_leader_region;
|
||||
pub(crate) mod manager;
|
||||
pub(crate) mod migration_abort;
|
||||
pub(crate) mod migration_end;
|
||||
@@ -112,8 +111,6 @@ impl PersistentContext {
|
||||
pub struct Metrics {
|
||||
/// Elapsed time of downgrading region and upgrading region.
|
||||
operations_elapsed: Duration,
|
||||
/// Elapsed time of flushing leader region.
|
||||
flush_leader_region_elapsed: Duration,
|
||||
/// Elapsed time of downgrading leader region.
|
||||
downgrade_leader_region_elapsed: Duration,
|
||||
/// Elapsed time of open candidate region.
|
||||
@@ -124,15 +121,10 @@ pub struct Metrics {
|
||||
|
||||
impl Display for Metrics {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let total = self.flush_leader_region_elapsed
|
||||
+ self.downgrade_leader_region_elapsed
|
||||
+ self.open_candidate_region_elapsed
|
||||
+ self.upgrade_candidate_region_elapsed;
|
||||
write!(
|
||||
f,
|
||||
"total: {:?}, flush_leader_region_elapsed: {:?}, downgrade_leader_region_elapsed: {:?}, open_candidate_region_elapsed: {:?}, upgrade_candidate_region_elapsed: {:?}",
|
||||
total,
|
||||
self.flush_leader_region_elapsed,
|
||||
"operations_elapsed: {:?}, downgrade_leader_region_elapsed: {:?}, open_candidate_region_elapsed: {:?}, upgrade_candidate_region_elapsed: {:?}",
|
||||
self.operations_elapsed,
|
||||
self.downgrade_leader_region_elapsed,
|
||||
self.open_candidate_region_elapsed,
|
||||
self.upgrade_candidate_region_elapsed
|
||||
@@ -146,11 +138,6 @@ impl Metrics {
|
||||
self.operations_elapsed += elapsed;
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of flushing leader region.
|
||||
pub fn update_flush_leader_region_elapsed(&mut self, elapsed: Duration) {
|
||||
self.flush_leader_region_elapsed += elapsed;
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of downgrading leader region.
|
||||
pub fn update_downgrade_leader_region_elapsed(&mut self, elapsed: Duration) {
|
||||
self.downgrade_leader_region_elapsed += elapsed;
|
||||
@@ -169,18 +156,10 @@ impl Metrics {
|
||||
|
||||
impl Drop for Metrics {
|
||||
fn drop(&mut self) {
|
||||
let total = self.flush_leader_region_elapsed
|
||||
+ self.downgrade_leader_region_elapsed
|
||||
+ self.open_candidate_region_elapsed
|
||||
+ self.upgrade_candidate_region_elapsed;
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["total"])
|
||||
.observe(total.as_secs_f64());
|
||||
|
||||
if !self.flush_leader_region_elapsed.is_zero() {
|
||||
if !self.operations_elapsed.is_zero() {
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["flush_leader_region"])
|
||||
.observe(self.flush_leader_region_elapsed.as_secs_f64());
|
||||
.with_label_values(&["operations"])
|
||||
.observe(self.operations_elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
if !self.downgrade_leader_region_elapsed.is_zero() {
|
||||
@@ -341,13 +320,6 @@ impl Context {
|
||||
.update_operations_elapsed(instant.elapsed());
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of flushing leader region.
|
||||
pub fn update_flush_leader_region_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
.metrics
|
||||
.update_flush_leader_region_elapsed(instant.elapsed());
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of downgrading leader region.
|
||||
pub fn update_downgrade_leader_region_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
@@ -728,8 +700,7 @@ mod tests {
|
||||
use crate::procedure::region_migration::open_candidate_region::OpenCandidateRegion;
|
||||
use crate::procedure::region_migration::test_util::*;
|
||||
use crate::procedure::test_util::{
|
||||
new_downgrade_region_reply, new_flush_region_reply, new_open_region_reply,
|
||||
new_upgrade_region_reply,
|
||||
new_downgrade_region_reply, new_open_region_reply, new_upgrade_region_reply,
|
||||
};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
@@ -1237,15 +1208,6 @@ mod tests {
|
||||
to_peer_id,
|
||||
Arc::new(|id| Ok(new_open_region_reply(id, true, None))),
|
||||
)),
|
||||
Assertion::simple(assert_flush_leader_region, assert_no_persist),
|
||||
),
|
||||
// Flush Leader Region
|
||||
Step::next(
|
||||
"Should be the flush leader region",
|
||||
Some(mock_datanode_reply(
|
||||
from_peer_id,
|
||||
Arc::new(|id| Ok(new_flush_region_reply(id, true, None))),
|
||||
)),
|
||||
Assertion::simple(assert_update_metadata_downgrade, assert_no_persist),
|
||||
),
|
||||
// UpdateMetadata::Downgrade
|
||||
|
||||
@@ -170,7 +170,7 @@ impl DowngradeLeaderRegion {
|
||||
if error.is_some() {
|
||||
return error::RetryLaterSnafu {
|
||||
reason: format!(
|
||||
"Failed to downgrade the region {} on datanode {:?}, error: {:?}, elapsed: {:?}",
|
||||
"Failed to downgrade the region {} on Datanode {:?}, error: {:?}, elapsed: {:?}",
|
||||
region_id, leader, error, now.elapsed()
|
||||
),
|
||||
}
|
||||
@@ -179,14 +179,13 @@ impl DowngradeLeaderRegion {
|
||||
|
||||
if !exists {
|
||||
warn!(
|
||||
"Trying to downgrade the region {} on datanode {:?}, but region doesn't exist!, elapsed: {:?}",
|
||||
"Trying to downgrade the region {} on Datanode {}, but region doesn't exist!, elapsed: {:?}",
|
||||
region_id, leader, now.elapsed()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Region {} leader is downgraded on datanode {:?}, last_entry_id: {:?}, metadata_last_entry_id: {:?}, elapsed: {:?}",
|
||||
"Region {} leader is downgraded, last_entry_id: {:?}, metadata_last_entry_id: {:?}, elapsed: {:?}",
|
||||
region_id,
|
||||
leader,
|
||||
last_entry_id,
|
||||
metadata_last_entry_id,
|
||||
now.elapsed()
|
||||
|
||||
@@ -1,285 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
|
||||
use common_procedure::Status;
|
||||
use common_telemetry::{info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::handler::HeartbeatMailbox;
|
||||
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::region_migration::{Context, State};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
/// Flushes the leader region before downgrading it.
|
||||
///
|
||||
/// This can minimize the time window where the region is not writable.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct PreFlushRegion;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for PreFlushRegion {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
let timer = Instant::now();
|
||||
self.flush_region(ctx).await?;
|
||||
ctx.update_flush_leader_region_elapsed(timer);
|
||||
// We intentionally don't update `operations_elapsed` here to prevent
|
||||
// the `next_operation_timeout` from being reduced by the flush operation.
|
||||
// This ensures sufficient time for subsequent critical operations.
|
||||
|
||||
Ok((
|
||||
Box::new(UpdateMetadata::Downgrade),
|
||||
Status::executing(false),
|
||||
))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl PreFlushRegion {
|
||||
/// Builds flush leader region instruction.
|
||||
fn build_flush_leader_region_instruction(&self, ctx: &Context) -> Instruction {
|
||||
let pc = &ctx.persistent_ctx;
|
||||
let region_id = pc.region_id;
|
||||
Instruction::FlushRegion(region_id)
|
||||
}
|
||||
|
||||
/// Tries to flush a leader region.
|
||||
///
|
||||
/// Ignore:
|
||||
/// - [PusherNotFound](error::Error::PusherNotFound), The datanode is unreachable.
|
||||
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
|
||||
/// - Failed to flush region on the Datanode.
|
||||
///
|
||||
/// Abort:
|
||||
/// - [MailboxTimeout](error::Error::MailboxTimeout), Timeout.
|
||||
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
|
||||
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply).
|
||||
/// - [ExceededDeadline](error::Error::ExceededDeadline)
|
||||
/// - Invalid JSON.
|
||||
async fn flush_region(&self, ctx: &mut Context) -> Result<()> {
|
||||
let operation_timeout =
|
||||
ctx.next_operation_timeout()
|
||||
.context(error::ExceededDeadlineSnafu {
|
||||
operation: "Flush leader region",
|
||||
})?;
|
||||
let flush_instruction = self.build_flush_leader_region_instruction(ctx);
|
||||
let region_id = ctx.persistent_ctx.region_id;
|
||||
let leader = &ctx.persistent_ctx.from_peer;
|
||||
|
||||
let msg = MailboxMessage::json_message(
|
||||
&format!("Flush leader region: {}", region_id),
|
||||
&format!("Metasrv@{}", ctx.server_addr()),
|
||||
&format!("Datanode-{}@{}", leader.id, leader.addr),
|
||||
common_time::util::current_time_millis(),
|
||||
&flush_instruction,
|
||||
)
|
||||
.with_context(|_| error::SerializeToJsonSnafu {
|
||||
input: flush_instruction.to_string(),
|
||||
})?;
|
||||
|
||||
let ch = Channel::Datanode(leader.id);
|
||||
let now = Instant::now();
|
||||
let result = ctx.mailbox.send(&ch, msg, operation_timeout).await;
|
||||
|
||||
match result {
|
||||
Ok(receiver) => match receiver.await? {
|
||||
Ok(msg) => {
|
||||
let reply = HeartbeatMailbox::json_reply(&msg)?;
|
||||
info!(
|
||||
"Received flush leader region reply: {:?}, region: {}, elapsed: {:?}",
|
||||
reply,
|
||||
region_id,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
let InstructionReply::FlushRegion(SimpleReply { result, error }) = reply else {
|
||||
return error::UnexpectedInstructionReplySnafu {
|
||||
mailbox_message: msg.to_string(),
|
||||
reason: "expect flush region reply",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
if error.is_some() {
|
||||
warn!(
|
||||
"Failed to flush leader region {} on datanode {:?}, error: {:?}. Skip flush operation.",
|
||||
region_id, leader, error
|
||||
);
|
||||
} else if result {
|
||||
info!(
|
||||
"The flush leader region {} on datanode {:?} is successful, elapsed: {:?}",
|
||||
region_id,
|
||||
leader,
|
||||
now.elapsed()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::MailboxTimeout { .. }) => error::ExceededDeadlineSnafu {
|
||||
operation: "Flush leader region",
|
||||
}
|
||||
.fail(),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
Err(Error::PusherNotFound { .. }) => {
|
||||
warn!(
|
||||
"Failed to flush leader region({}), the datanode({}) is unreachable(PusherNotFound). Skip flush operation.",
|
||||
region_id,
|
||||
leader
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::*;
|
||||
use crate::procedure::region_migration::test_util::{self, TestingEnv};
|
||||
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
|
||||
use crate::procedure::test_util::{
|
||||
new_close_region_reply, new_flush_region_reply, send_mock_reply,
|
||||
};
|
||||
|
||||
fn new_persistent_context() -> PersistentContext {
|
||||
test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_datanode_is_unreachable() {
|
||||
let state = PreFlushRegion;
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
let persistent_context = new_persistent_context();
|
||||
let env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
// Should be ok, if leader region is unreachable. it will skip flush operation.
|
||||
state.flush_region(&mut ctx).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unexpected_instruction_reply() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let state = PreFlushRegion;
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
let persistent_context = new_persistent_context();
|
||||
let from_peer_id = persistent_context.from_peer.id;
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
|
||||
.await;
|
||||
// Sends an incorrect reply.
|
||||
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
|
||||
let err = state.flush_region(&mut ctx).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
|
||||
assert!(!err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_instruction_exceeded_deadline() {
|
||||
let state = PreFlushRegion;
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
let persistent_context = new_persistent_context();
|
||||
let from_peer_id = persistent_context.from_peer.id;
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
|
||||
.await;
|
||||
// Sends an timeout error.
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Err(error::MailboxTimeoutSnafu { id }.build())
|
||||
});
|
||||
|
||||
let err = state.flush_region(&mut ctx).await.unwrap_err();
|
||||
assert_matches!(err, Error::ExceededDeadline { .. });
|
||||
assert!(!err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_region_failed() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let state = PreFlushRegion;
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
let persistent_context = new_persistent_context();
|
||||
let from_peer_id = persistent_context.from_peer.id;
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
|
||||
.await;
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Ok(new_flush_region_reply(
|
||||
id,
|
||||
false,
|
||||
Some("test mocked".to_string()),
|
||||
))
|
||||
});
|
||||
// Should be ok, if flush leader region failed. it will skip flush operation.
|
||||
state.flush_region(&mut ctx).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_next_update_metadata_downgrade_state() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut state = PreFlushRegion;
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
let persistent_context = new_persistent_context();
|
||||
let from_peer_id = persistent_context.from_peer.id;
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
|
||||
.await;
|
||||
send_mock_reply(mailbox, rx, |id| Ok(new_flush_region_reply(id, true, None)));
|
||||
let (next, _) = state.next(&mut ctx).await.unwrap();
|
||||
|
||||
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
|
||||
assert_matches!(update_metadata, UpdateMetadata::Downgrade);
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ use tokio::time::Instant;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::handler::HeartbeatMailbox;
|
||||
use crate::procedure::region_migration::flush_leader_region::PreFlushRegion;
|
||||
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::region_migration::{Context, State};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
@@ -47,7 +47,10 @@ impl State for OpenCandidateRegion {
|
||||
self.open_candidate_region(ctx, instruction).await?;
|
||||
ctx.update_open_candidate_region_elapsed(now);
|
||||
|
||||
Ok((Box::new(PreFlushRegion), Status::executing(false)))
|
||||
Ok((
|
||||
Box::new(UpdateMetadata::Downgrade),
|
||||
Status::executing(false),
|
||||
))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_next_flush_leader_region_state() {
|
||||
async fn test_next_update_metadata_downgrade_state() {
|
||||
let mut state = Box::new(OpenCandidateRegion);
|
||||
// from_peer: 1
|
||||
// to_peer: 2
|
||||
@@ -442,7 +445,8 @@ mod tests {
|
||||
(to_peer_id, region_id)
|
||||
);
|
||||
|
||||
let flush_leader_region = next.as_any().downcast_ref::<PreFlushRegion>().unwrap();
|
||||
assert_matches!(flush_leader_region, PreFlushRegion);
|
||||
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
|
||||
|
||||
assert_matches!(update_metadata, UpdateMetadata::Downgrade);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,6 @@ use crate::error::{self, Error, Result};
|
||||
use crate::metasrv::MetasrvInfo;
|
||||
use crate::procedure::region_migration::close_downgraded_region::CloseDowngradedRegion;
|
||||
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
|
||||
use crate::procedure::region_migration::flush_leader_region::PreFlushRegion;
|
||||
use crate::procedure::region_migration::manager::RegionMigrationProcedureTracker;
|
||||
use crate::procedure::region_migration::migration_abort::RegionMigrationAbort;
|
||||
use crate::procedure::region_migration::migration_end::RegionMigrationEnd;
|
||||
@@ -416,11 +415,6 @@ pub(crate) fn assert_open_candidate_region(next: &dyn State) {
|
||||
let _ = next.as_any().downcast_ref::<OpenCandidateRegion>().unwrap();
|
||||
}
|
||||
|
||||
/// Asserts the [State] should be [FlushLeaderRegion].
|
||||
pub(crate) fn assert_flush_leader_region(next: &dyn State) {
|
||||
let _ = next.as_any().downcast_ref::<PreFlushRegion>().unwrap();
|
||||
}
|
||||
|
||||
/// Asserts the [State] should be [UpdateMetadata::Downgrade].
|
||||
pub(crate) fn assert_update_metadata_downgrade(next: &dyn State) {
|
||||
let state = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
|
||||
|
||||
@@ -101,24 +101,6 @@ pub fn new_open_region_reply(id: u64, result: bool, error: Option<String>) -> Ma
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a [InstructionReply::FlushRegion] reply.
|
||||
pub fn new_flush_region_reply(id: u64, result: bool, error: Option<String>) -> MailboxMessage {
|
||||
MailboxMessage {
|
||||
id,
|
||||
subject: "mock".to_string(),
|
||||
from: "datanode".to_string(),
|
||||
to: "meta".to_string(),
|
||||
timestamp_millis: current_time_millis(),
|
||||
payload: Some(Payload::Json(
|
||||
serde_json::to_string(&InstructionReply::FlushRegion(SimpleReply {
|
||||
result,
|
||||
error,
|
||||
}))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a [InstructionReply::CloseRegion] reply.
|
||||
pub fn new_close_region_reply(id: u64) -> MailboxMessage {
|
||||
MailboxMessage {
|
||||
|
||||
@@ -181,7 +181,7 @@ impl WalPruneProcedure {
|
||||
let peer_and_instructions = peer_region_ids_map
|
||||
.into_iter()
|
||||
.map(|(peer, region_ids)| {
|
||||
let flush_instruction = Instruction::FlushRegions(FlushRegions { region_ids });
|
||||
let flush_instruction = Instruction::FlushRegion(FlushRegions { region_ids });
|
||||
(peer.clone(), flush_instruction)
|
||||
})
|
||||
.collect();
|
||||
@@ -536,7 +536,7 @@ mod tests {
|
||||
let msg = resp.mailbox_message.unwrap();
|
||||
let flush_instruction = HeartbeatMailbox::json_instruction(&msg).unwrap();
|
||||
let mut flush_requested_region_ids = match flush_instruction {
|
||||
Instruction::FlushRegions(FlushRegions { region_ids, .. }) => region_ids,
|
||||
Instruction::FlushRegion(FlushRegions { region_ids, .. }) => region_ids,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let sorted_region_ids = region_ids
|
||||
|
||||
@@ -22,20 +22,20 @@ use common_meta::datanode::Stat;
|
||||
use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController};
|
||||
use common_meta::key::maintenance::MaintenanceModeManagerRef;
|
||||
use common_meta::leadership_notifier::LeadershipChangeListener;
|
||||
use common_meta::peer::{Peer, PeerLookupServiceRef};
|
||||
use common_meta::peer::PeerLookupServiceRef;
|
||||
use common_meta::DatanodeId;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use error::Error::{LeaderPeerChanged, MigrationRunning, TableRouteNotFound};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::time::{interval, MissedTickBehavior};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::failure_detector::PhiAccrualFailureDetectorOptions;
|
||||
use crate::metasrv::{RegionStatAwareSelectorRef, SelectTarget, SelectorContext, SelectorRef};
|
||||
use crate::metasrv::{SelectorContext, SelectorRef};
|
||||
use crate::procedure::region_migration::manager::RegionMigrationManagerRef;
|
||||
use crate::procedure::region_migration::{
|
||||
RegionMigrationProcedureTask, DEFAULT_REGION_MIGRATION_TIMEOUT,
|
||||
@@ -203,12 +203,6 @@ pub type RegionSupervisorRef = Arc<RegionSupervisor>;
|
||||
/// The default tick interval.
|
||||
pub const DEFAULT_TICK_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
/// Selector for region supervisor.
|
||||
pub enum RegionSupervisorSelector {
|
||||
NaiveSelector(SelectorRef),
|
||||
RegionStatAwareSelector(RegionStatAwareSelectorRef),
|
||||
}
|
||||
|
||||
/// The [`RegionSupervisor`] is used to detect Region failures
|
||||
/// and initiate Region failover upon detection, ensuring uninterrupted region service.
|
||||
pub struct RegionSupervisor {
|
||||
@@ -221,7 +215,7 @@ pub struct RegionSupervisor {
|
||||
/// The context of [`SelectorRef`]
|
||||
selector_context: SelectorContext,
|
||||
/// Candidate node selector.
|
||||
selector: RegionSupervisorSelector,
|
||||
selector: SelectorRef,
|
||||
/// Region migration manager.
|
||||
region_migration_manager: RegionMigrationManagerRef,
|
||||
/// The maintenance mode manager.
|
||||
@@ -294,7 +288,7 @@ impl RegionSupervisor {
|
||||
event_receiver: Receiver<Event>,
|
||||
options: PhiAccrualFailureDetectorOptions,
|
||||
selector_context: SelectorContext,
|
||||
selector: RegionSupervisorSelector,
|
||||
selector: SelectorRef,
|
||||
region_migration_manager: RegionMigrationManagerRef,
|
||||
maintenance_mode_manager: MaintenanceModeManagerRef,
|
||||
peer_lookup: PeerLookupServiceRef,
|
||||
@@ -368,7 +362,6 @@ impl RegionSupervisor {
|
||||
}
|
||||
}
|
||||
|
||||
// Extracts regions that are migrating(failover), which means they are already being triggered failover.
|
||||
let migrating_regions = regions
|
||||
.extract_if(.., |(_, region_id)| {
|
||||
self.region_migration_manager.tracker().contains(*region_id)
|
||||
@@ -381,43 +374,10 @@ impl RegionSupervisor {
|
||||
);
|
||||
}
|
||||
|
||||
if regions.is_empty() {
|
||||
// If all detected regions are failover or migrating, just return.
|
||||
return;
|
||||
}
|
||||
|
||||
let mut grouped_regions: HashMap<u64, Vec<RegionId>> =
|
||||
HashMap::with_capacity(regions.len());
|
||||
warn!("Detects region failures: {:?}", regions);
|
||||
for (datanode_id, region_id) in regions {
|
||||
grouped_regions
|
||||
.entry(datanode_id)
|
||||
.or_default()
|
||||
.push(region_id);
|
||||
}
|
||||
|
||||
for (datanode_id, regions) in grouped_regions {
|
||||
warn!(
|
||||
"Detects region failures on datanode: {}, regions: {:?}",
|
||||
datanode_id, regions
|
||||
);
|
||||
// We can't use `grouped_regions.keys().cloned().collect::<Vec<_>>()` here
|
||||
// because there may be false positives in failure detection on the datanode.
|
||||
// So we only consider the datanode that reports the failure.
|
||||
let failed_datanodes = [datanode_id];
|
||||
match self
|
||||
.generate_failover_tasks(datanode_id, ®ions, &failed_datanodes)
|
||||
.await
|
||||
{
|
||||
Ok(tasks) => {
|
||||
for (task, count) in tasks {
|
||||
let region_id = task.region_id;
|
||||
let datanode_id = task.from_peer.id;
|
||||
if let Err(err) = self.do_failover(task, count).await {
|
||||
error!(err; "Failed to execute region failover for region: {}, datanode: {}", region_id, datanode_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => error!(err; "Failed to generate failover tasks"),
|
||||
if let Err(err) = self.do_failover(datanode_id, region_id).await {
|
||||
error!(err; "Failed to execute region failover for region: {region_id}, datanode: {datanode_id}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -429,107 +389,49 @@ impl RegionSupervisor {
|
||||
.context(error::MaintenanceModeManagerSnafu)
|
||||
}
|
||||
|
||||
async fn select_peers(
|
||||
&self,
|
||||
from_peer_id: DatanodeId,
|
||||
regions: &[RegionId],
|
||||
failure_datanodes: &[DatanodeId],
|
||||
) -> Result<Vec<(RegionId, Peer)>> {
|
||||
let exclude_peer_ids = HashSet::from_iter(failure_datanodes.iter().cloned());
|
||||
match &self.selector {
|
||||
RegionSupervisorSelector::NaiveSelector(selector) => {
|
||||
let opt = SelectorOptions {
|
||||
min_required_items: regions.len(),
|
||||
allow_duplication: true,
|
||||
exclude_peer_ids,
|
||||
};
|
||||
let peers = selector.select(&self.selector_context, opt).await?;
|
||||
ensure!(
|
||||
peers.len() == regions.len(),
|
||||
error::NoEnoughAvailableNodeSnafu {
|
||||
required: regions.len(),
|
||||
available: peers.len(),
|
||||
select_target: SelectTarget::Datanode,
|
||||
}
|
||||
);
|
||||
let region_peers = regions
|
||||
.iter()
|
||||
.zip(peers)
|
||||
.map(|(region_id, peer)| (*region_id, peer))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(region_peers)
|
||||
}
|
||||
RegionSupervisorSelector::RegionStatAwareSelector(selector) => {
|
||||
let peers = selector
|
||||
.select(
|
||||
&self.selector_context,
|
||||
from_peer_id,
|
||||
regions,
|
||||
exclude_peer_ids,
|
||||
)
|
||||
.await?;
|
||||
ensure!(
|
||||
peers.len() == regions.len(),
|
||||
error::NoEnoughAvailableNodeSnafu {
|
||||
required: regions.len(),
|
||||
available: peers.len(),
|
||||
select_target: SelectTarget::Datanode,
|
||||
}
|
||||
);
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn generate_failover_tasks(
|
||||
&mut self,
|
||||
from_peer_id: DatanodeId,
|
||||
regions: &[RegionId],
|
||||
failed_datanodes: &[DatanodeId],
|
||||
) -> Result<Vec<(RegionMigrationProcedureTask, u32)>> {
|
||||
let mut tasks = Vec::with_capacity(regions.len());
|
||||
async fn do_failover(&mut self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
||||
let count = *self
|
||||
.failover_counts
|
||||
.entry((datanode_id, region_id))
|
||||
.and_modify(|count| *count += 1)
|
||||
.or_insert(1);
|
||||
let from_peer = self
|
||||
.peer_lookup
|
||||
.datanode(from_peer_id)
|
||||
.datanode(datanode_id)
|
||||
.await
|
||||
.context(error::LookupPeerSnafu {
|
||||
peer_id: from_peer_id,
|
||||
peer_id: datanode_id,
|
||||
})?
|
||||
.context(error::PeerUnavailableSnafu {
|
||||
peer_id: from_peer_id,
|
||||
peer_id: datanode_id,
|
||||
})?;
|
||||
let region_peers = self
|
||||
.select_peers(from_peer_id, regions, failed_datanodes)
|
||||
let mut peers = self
|
||||
.selector
|
||||
.select(
|
||||
&self.selector_context,
|
||||
SelectorOptions {
|
||||
min_required_items: 1,
|
||||
allow_duplication: false,
|
||||
exclude_peer_ids: HashSet::from([from_peer.id]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
for (region_id, peer) in region_peers {
|
||||
let count = *self
|
||||
.failover_counts
|
||||
.entry((from_peer_id, region_id))
|
||||
.and_modify(|count| *count += 1)
|
||||
.or_insert(1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
region_id,
|
||||
from_peer: from_peer.clone(),
|
||||
to_peer: peer,
|
||||
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT * count,
|
||||
};
|
||||
tasks.push((task, count));
|
||||
let to_peer = peers.remove(0);
|
||||
if to_peer.id == from_peer.id {
|
||||
warn!(
|
||||
"Skip failover for region: {region_id}, from_peer: {from_peer}, trying to failover to the same peer."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
async fn do_failover(&mut self, task: RegionMigrationProcedureTask, count: u32) -> Result<()> {
|
||||
let from_peer_id = task.from_peer.id;
|
||||
let region_id = task.region_id;
|
||||
|
||||
info!(
|
||||
"Failover for region: {}, from_peer: {}, to_peer: {}, timeout: {:?}, tries: {}",
|
||||
task.region_id, task.from_peer, task.to_peer, task.timeout, count
|
||||
"Failover for region: {region_id}, from_peer: {from_peer}, to_peer: {to_peer}, tries: {count}"
|
||||
);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT * count,
|
||||
};
|
||||
|
||||
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
|
||||
return match err {
|
||||
@@ -537,25 +439,25 @@ impl RegionSupervisor {
|
||||
MigrationRunning { .. } => {
|
||||
info!(
|
||||
"Another region migration is running, skip failover for region: {}, datanode: {}",
|
||||
region_id, from_peer_id
|
||||
region_id, datanode_id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
TableRouteNotFound { .. } => {
|
||||
self.deregister_failure_detectors(vec![(from_peer_id, region_id)])
|
||||
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
info!(
|
||||
"Table route is not found, the table is dropped, removed failover detector for region: {}, datanode: {}",
|
||||
region_id, from_peer_id
|
||||
region_id, datanode_id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
LeaderPeerChanged { .. } => {
|
||||
self.deregister_failure_detectors(vec![(from_peer_id, region_id)])
|
||||
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
info!(
|
||||
"Region's leader peer changed, removed failover detector for region: {}, datanode: {}",
|
||||
region_id, from_peer_id
|
||||
region_id, datanode_id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -619,7 +521,6 @@ pub(crate) mod tests {
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use super::RegionSupervisorSelector;
|
||||
use crate::procedure::region_migration::manager::RegionMigrationManager;
|
||||
use crate::procedure::region_migration::test_util::TestingEnv;
|
||||
use crate::region::supervisor::{
|
||||
@@ -647,7 +548,7 @@ pub(crate) mod tests {
|
||||
rx,
|
||||
Default::default(),
|
||||
selector_context,
|
||||
RegionSupervisorSelector::NaiveSelector(selector),
|
||||
selector,
|
||||
region_migration_manager,
|
||||
maintenance_mode_manager,
|
||||
peer_lookup,
|
||||
|
||||
@@ -23,7 +23,6 @@ pub mod weighted_choose;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
|
||||
use crate::error;
|
||||
@@ -37,24 +36,6 @@ pub trait Selector: Send + Sync {
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output>;
|
||||
}
|
||||
|
||||
/// A selector that aware of region statistics
|
||||
///
|
||||
/// It selects the best destination peer for a list of regions.
|
||||
/// The selection is based on the region statistics, such as the region leader's write throughput.
|
||||
#[async_trait::async_trait]
|
||||
pub trait RegionStatAwareSelector: Send + Sync {
|
||||
type Context;
|
||||
type Output;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
ctx: &Self::Context,
|
||||
from_peer_id: u64,
|
||||
region_ids: &[RegionId],
|
||||
exclude_peer_ids: HashSet<u64>,
|
||||
) -> Result<Self::Output>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SelectorOptions {
|
||||
/// Minimum number of selected results.
|
||||
|
||||
@@ -42,11 +42,11 @@ pub(crate) use state::MetricEngineState;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use store_api::region_engine::{
|
||||
BatchResponses, RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef,
|
||||
RegionStatistic, SetRegionRoleStateResponse, SetRegionRoleStateSuccess,
|
||||
SettableRegionRoleState, SyncManifestResponse,
|
||||
RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef, RegionStatistic,
|
||||
SetRegionRoleStateResponse, SetRegionRoleStateSuccess, SettableRegionRoleState,
|
||||
SyncManifestResponse,
|
||||
};
|
||||
use store_api::region_request::{BatchRegionDdlRequest, RegionOpenRequest, RegionRequest};
|
||||
use store_api::region_request::{BatchRegionDdlRequest, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
|
||||
use crate::config::EngineConfig;
|
||||
@@ -131,17 +131,6 @@ impl RegionEngine for MetricEngine {
|
||||
METRIC_ENGINE_NAME
|
||||
}
|
||||
|
||||
async fn handle_batch_open_requests(
|
||||
&self,
|
||||
parallelism: usize,
|
||||
requests: Vec<(RegionId, RegionOpenRequest)>,
|
||||
) -> Result<BatchResponses, BoxedError> {
|
||||
self.inner
|
||||
.handle_batch_open_requests(parallelism, requests)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
}
|
||||
|
||||
async fn handle_batch_ddl_requests(
|
||||
&self,
|
||||
batch_request: BatchRegionDdlRequest,
|
||||
|
||||
@@ -14,80 +14,24 @@
|
||||
|
||||
//! Open a metric region.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use common_telemetry::info;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use object_store::util::join_dir;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::codec::PrimaryKeyEncoding;
|
||||
use store_api::metric_engine_consts::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR};
|
||||
use store_api::region_engine::{BatchResponses, RegionEngine};
|
||||
use store_api::region_engine::RegionEngine;
|
||||
use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::engine::create::region_options_for_metadata_region;
|
||||
use crate::engine::options::{set_data_region_options, PhysicalRegionOptions};
|
||||
use crate::engine::MetricEngineInner;
|
||||
use crate::error::{
|
||||
BatchOpenMitoRegionSnafu, OpenMitoRegionSnafu, PhysicalRegionNotFoundSnafu, Result,
|
||||
};
|
||||
use crate::error::{OpenMitoRegionSnafu, PhysicalRegionNotFoundSnafu, Result};
|
||||
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_REGION_COUNT};
|
||||
use crate::utils;
|
||||
|
||||
impl MetricEngineInner {
|
||||
pub async fn handle_batch_open_requests(
|
||||
&self,
|
||||
parallelism: usize,
|
||||
requests: Vec<(RegionId, RegionOpenRequest)>,
|
||||
) -> Result<BatchResponses> {
|
||||
// We need to open metadata region and data region for each request.
|
||||
let mut all_requests = Vec::with_capacity(requests.len() * 2);
|
||||
let mut physical_region_ids = Vec::with_capacity(requests.len());
|
||||
let mut data_region_ids = HashSet::with_capacity(requests.len());
|
||||
|
||||
for (region_id, request) in requests {
|
||||
if !request.is_physical_table() {
|
||||
continue;
|
||||
}
|
||||
let physical_region_options = PhysicalRegionOptions::try_from(&request.options)?;
|
||||
let metadata_region_id = utils::to_metadata_region_id(region_id);
|
||||
let data_region_id = utils::to_data_region_id(region_id);
|
||||
let (open_metadata_region_request, open_data_region_request) =
|
||||
self.transform_open_physical_region_request(request);
|
||||
all_requests.push((metadata_region_id, open_metadata_region_request));
|
||||
all_requests.push((data_region_id, open_data_region_request));
|
||||
physical_region_ids.push((region_id, physical_region_options));
|
||||
data_region_ids.insert(data_region_id);
|
||||
}
|
||||
|
||||
let results = self
|
||||
.mito
|
||||
.handle_batch_open_requests(parallelism, all_requests)
|
||||
.await
|
||||
.context(BatchOpenMitoRegionSnafu {})?
|
||||
.into_iter()
|
||||
.filter(|(region_id, _)| data_region_ids.contains(region_id))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (physical_region_id, physical_region_options) in physical_region_ids {
|
||||
let primary_key_encoding = self
|
||||
.mito
|
||||
.get_primary_key_encoding(physical_region_id)
|
||||
.context(PhysicalRegionNotFoundSnafu {
|
||||
region_id: physical_region_id,
|
||||
})?;
|
||||
self.recover_states(
|
||||
physical_region_id,
|
||||
primary_key_encoding,
|
||||
physical_region_options,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Open a metric region.
|
||||
///
|
||||
/// Only open requests to a physical region matter. Those to logical regions are
|
||||
@@ -125,15 +69,12 @@ impl MetricEngineInner {
|
||||
}
|
||||
}
|
||||
|
||||
/// Transform the open request to open metadata region and data region.
|
||||
///
|
||||
/// Returns:
|
||||
/// - The open request for metadata region.
|
||||
/// - The open request for data region.
|
||||
fn transform_open_physical_region_request(
|
||||
/// Invokes mito engine to open physical regions (data and metadata).
|
||||
async fn open_physical_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
request: RegionOpenRequest,
|
||||
) -> (RegionOpenRequest, RegionOpenRequest) {
|
||||
) -> Result<AffectedRows> {
|
||||
let metadata_region_dir = join_dir(&request.region_dir, METADATA_REGION_SUBDIR);
|
||||
let data_region_dir = join_dir(&request.region_dir, DATA_REGION_SUBDIR);
|
||||
|
||||
@@ -157,19 +98,8 @@ impl MetricEngineInner {
|
||||
skip_wal_replay: request.skip_wal_replay,
|
||||
};
|
||||
|
||||
(open_metadata_region_request, open_data_region_request)
|
||||
}
|
||||
|
||||
/// Invokes mito engine to open physical regions (data and metadata).
|
||||
async fn open_physical_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
request: RegionOpenRequest,
|
||||
) -> Result<AffectedRows> {
|
||||
let metadata_region_id = utils::to_metadata_region_id(region_id);
|
||||
let data_region_id = utils::to_data_region_id(region_id);
|
||||
let (open_metadata_region_request, open_data_region_request) =
|
||||
self.transform_open_physical_region_request(request);
|
||||
|
||||
self.mito
|
||||
.handle_request(
|
||||
|
||||
@@ -42,13 +42,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to batch open mito region"))]
|
||||
BatchOpenMitoRegion {
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to close mito region, region id: {}", region_id))]
|
||||
CloseMitoRegion {
|
||||
region_id: RegionId,
|
||||
@@ -344,8 +337,7 @@ impl ErrorExt for Error {
|
||||
| MitoCatchupOperation { source, .. }
|
||||
| MitoFlushOperation { source, .. }
|
||||
| MitoDeleteOperation { source, .. }
|
||||
| MitoSyncOperation { source, .. }
|
||||
| BatchOpenMitoRegion { source, .. } => source.status_code(),
|
||||
| MitoSyncOperation { source, .. } => source.status_code(),
|
||||
|
||||
EncodePrimaryKey { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -710,8 +710,8 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to filter record batch"))]
|
||||
FilterRecordBatch {
|
||||
#[snafu(display("Record batch error"))]
|
||||
RecordBatch {
|
||||
source: common_recordbatch::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
@@ -1032,6 +1032,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan series"))]
|
||||
ScanSeries {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: Arc<Error>,
|
||||
},
|
||||
|
||||
#[snafu(display("Partition {} scan multiple times", partition))]
|
||||
ScanMultiTimes {
|
||||
partition: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
@@ -1154,7 +1168,7 @@ impl ErrorExt for Error {
|
||||
|
||||
External { source, .. } => source.status_code(),
|
||||
|
||||
FilterRecordBatch { source, .. } => source.status_code(),
|
||||
RecordBatch { source, .. } => source.status_code(),
|
||||
|
||||
Download { .. } | Upload { .. } => StatusCode::StorageUnavailable,
|
||||
ChecksumMismatch { .. } => StatusCode::Unexpected,
|
||||
@@ -1183,7 +1197,12 @@ impl ErrorExt for Error {
|
||||
ManualCompactionOverride {} => StatusCode::Cancelled,
|
||||
|
||||
IncompatibleWalProviderChange { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
ConvertDataType { .. } => StatusCode::Internal,
|
||||
|
||||
ScanSeries { source, .. } => source.status_code(),
|
||||
|
||||
ScanMultiTimes { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -302,10 +302,7 @@ impl PartitionTreeMemtable {
|
||||
fn update_stats(&self, metrics: &WriteMetrics) {
|
||||
// Only let the tracker tracks value bytes.
|
||||
self.alloc_tracker.on_allocation(metrics.value_bytes);
|
||||
self.max_timestamp
|
||||
.fetch_max(metrics.max_ts, Ordering::SeqCst);
|
||||
self.min_timestamp
|
||||
.fetch_min(metrics.min_ts, Ordering::SeqCst);
|
||||
metrics.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
//! Internal metrics of the memtable.
|
||||
|
||||
use std::sync::atomic::{AtomicI64, Ordering};
|
||||
|
||||
/// Metrics of writing memtables.
|
||||
pub(crate) struct WriteMetrics {
|
||||
/// Size allocated by keys.
|
||||
@@ -26,6 +28,51 @@ pub(crate) struct WriteMetrics {
|
||||
pub(crate) max_ts: i64,
|
||||
}
|
||||
|
||||
impl WriteMetrics {
|
||||
/// Update the min/max timestamp range according to current write metric.
|
||||
pub(crate) fn update_timestamp_range(&self, prev_max_ts: &AtomicI64, prev_min_ts: &AtomicI64) {
|
||||
loop {
|
||||
let current_min = prev_min_ts.load(Ordering::Relaxed);
|
||||
if self.min_ts >= current_min {
|
||||
break;
|
||||
}
|
||||
|
||||
let Err(updated) = prev_min_ts.compare_exchange(
|
||||
current_min,
|
||||
self.min_ts,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) else {
|
||||
break;
|
||||
};
|
||||
|
||||
if updated == self.min_ts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
let current_max = prev_max_ts.load(Ordering::Relaxed);
|
||||
if self.max_ts <= current_max {
|
||||
break;
|
||||
}
|
||||
|
||||
let Err(updated) = prev_max_ts.compare_exchange(
|
||||
current_max,
|
||||
self.max_ts,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) else {
|
||||
break;
|
||||
};
|
||||
|
||||
if updated == self.max_ts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WriteMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
||||
@@ -147,8 +147,7 @@ impl TimeSeriesMemtable {
|
||||
fn update_stats(&self, stats: WriteMetrics) {
|
||||
self.alloc_tracker
|
||||
.on_allocation(stats.key_bytes + stats.value_bytes);
|
||||
self.max_timestamp.fetch_max(stats.max_ts, Ordering::SeqCst);
|
||||
self.min_timestamp.fetch_min(stats.min_ts, Ordering::SeqCst);
|
||||
stats.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
|
||||
}
|
||||
|
||||
fn write_key_value(&self, kv: KeyValue, stats: &mut WriteMetrics) -> Result<()> {
|
||||
|
||||
@@ -24,6 +24,7 @@ pub(crate) mod range;
|
||||
pub(crate) mod scan_region;
|
||||
pub(crate) mod scan_util;
|
||||
pub(crate) mod seq_scan;
|
||||
pub(crate) mod series_scan;
|
||||
pub(crate) mod unordered_scan;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
@@ -21,7 +21,7 @@ use datatypes::arrow::array::BooleanArray;
|
||||
use datatypes::arrow::buffer::BooleanBuffer;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FilterRecordBatchSnafu, Result};
|
||||
use crate::error::{RecordBatchSnafu, Result};
|
||||
use crate::memtable::BoxedBatchIterator;
|
||||
use crate::read::last_row::RowGroupLastRowCachedReader;
|
||||
use crate::read::{Batch, BatchReader};
|
||||
@@ -201,7 +201,7 @@ impl PruneTimeIterator {
|
||||
for filter in filters.iter() {
|
||||
let result = filter
|
||||
.evaluate_vector(batch.timestamps())
|
||||
.context(FilterRecordBatchSnafu)?;
|
||||
.context(RecordBatchSnafu)?;
|
||||
mask = mask.bitand(&result);
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ use crate::read::compat::{self, CompatBatch};
|
||||
use crate::read::projection::ProjectionMapper;
|
||||
use crate::read::range::{FileRangeBuilder, MemRangeBuilder, RangeMeta, RowGroupIndex};
|
||||
use crate::read::seq_scan::SeqScan;
|
||||
use crate::read::series_scan::SeriesScan;
|
||||
use crate::read::unordered_scan::UnorderedScan;
|
||||
use crate::read::{Batch, Source};
|
||||
use crate::region::options::MergeMode;
|
||||
@@ -66,6 +67,8 @@ pub(crate) enum Scanner {
|
||||
Seq(SeqScan),
|
||||
/// Unordered scan.
|
||||
Unordered(UnorderedScan),
|
||||
/// Per-series scan.
|
||||
Series(SeriesScan),
|
||||
}
|
||||
|
||||
impl Scanner {
|
||||
@@ -75,6 +78,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.build_stream(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.build_stream().await,
|
||||
Scanner::Series(series_scan) => series_scan.build_stream().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,6 +90,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().num_files(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_files(),
|
||||
Scanner::Series(series_scan) => series_scan.input().num_files(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +99,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().num_memtables(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_memtables(),
|
||||
Scanner::Series(series_scan) => series_scan.input().num_memtables(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,6 +108,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().file_ids(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().file_ids(),
|
||||
Scanner::Series(series_scan) => series_scan.input().file_ids(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,6 +120,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.prepare(request).unwrap(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.prepare(request).unwrap(),
|
||||
Scanner::Series(series_scan) => series_scan.prepare(request).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -248,7 +256,9 @@ impl ScanRegion {
|
||||
|
||||
/// Returns a [Scanner] to scan the region.
|
||||
pub(crate) fn scanner(self) -> Result<Scanner> {
|
||||
if self.use_unordered_scan() {
|
||||
if self.use_series_scan() {
|
||||
self.series_scan().map(Scanner::Series)
|
||||
} else if self.use_unordered_scan() {
|
||||
// If table is append only and there is no series row selector, we use unordered scan in query.
|
||||
// We still use seq scan in compaction.
|
||||
self.unordered_scan().map(Scanner::Unordered)
|
||||
@@ -260,7 +270,9 @@ impl ScanRegion {
|
||||
/// Returns a [RegionScanner] to scan the region.
|
||||
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
|
||||
pub(crate) fn region_scanner(self) -> Result<RegionScannerRef> {
|
||||
if self.use_unordered_scan() {
|
||||
if self.use_series_scan() {
|
||||
self.series_scan().map(|scanner| Box::new(scanner) as _)
|
||||
} else if self.use_unordered_scan() {
|
||||
self.unordered_scan().map(|scanner| Box::new(scanner) as _)
|
||||
} else {
|
||||
self.seq_scan().map(|scanner| Box::new(scanner) as _)
|
||||
@@ -279,6 +291,12 @@ impl ScanRegion {
|
||||
Ok(UnorderedScan::new(input))
|
||||
}
|
||||
|
||||
/// Scans by series.
|
||||
pub(crate) fn series_scan(self) -> Result<SeriesScan> {
|
||||
let input = self.scan_input(true)?;
|
||||
Ok(SeriesScan::new(input))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn scan_without_filter_deleted(self) -> Result<SeqScan> {
|
||||
let input = self.scan_input(false)?;
|
||||
@@ -299,6 +317,11 @@ impl ScanRegion {
|
||||
|| self.request.distribution == Some(TimeSeriesDistribution::TimeWindowed))
|
||||
}
|
||||
|
||||
/// Returns true if the region can use series scan for current request.
|
||||
fn use_series_scan(&self) -> bool {
|
||||
self.request.distribution == Some(TimeSeriesDistribution::PerSeries)
|
||||
}
|
||||
|
||||
/// Creates a scan input.
|
||||
fn scan_input(mut self, filter_deleted: bool) -> Result<ScanInput> {
|
||||
let time_range = self.build_time_range_predicate();
|
||||
@@ -322,10 +345,13 @@ impl ScanRegion {
|
||||
let memtables: Vec<_> = memtables
|
||||
.into_iter()
|
||||
.filter(|mem| {
|
||||
// check if memtable is empty by reading stats.
|
||||
let Some((start, end)) = mem.stats().time_range() else {
|
||||
if mem.is_empty() {
|
||||
return false;
|
||||
};
|
||||
}
|
||||
let stats = mem.stats();
|
||||
// Safety: the memtable is not empty.
|
||||
let (start, end) = stats.time_range().unwrap();
|
||||
|
||||
// The time range of the memtable is inclusive.
|
||||
let memtable_range = TimestampRange::new_inclusive(Some(start), Some(end));
|
||||
memtable_range.intersects(&time_range)
|
||||
|
||||
@@ -92,6 +92,8 @@ struct ScanMetricsSet {
|
||||
|
||||
/// Elapsed time before the first poll operation.
|
||||
first_poll: Duration,
|
||||
/// Number of send timeout in SeriesScan.
|
||||
num_series_send_timeout: usize,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ScanMetricsSet {
|
||||
@@ -122,6 +124,7 @@ impl fmt::Debug for ScanMetricsSet {
|
||||
num_sst_batches,
|
||||
num_sst_rows,
|
||||
first_poll,
|
||||
num_series_send_timeout,
|
||||
} = self;
|
||||
|
||||
write!(
|
||||
@@ -150,7 +153,8 @@ impl fmt::Debug for ScanMetricsSet {
|
||||
num_sst_record_batches={num_sst_record_batches}, \
|
||||
num_sst_batches={num_sst_batches}, \
|
||||
num_sst_rows={num_sst_rows}, \
|
||||
first_poll={first_poll:?}}}"
|
||||
first_poll={first_poll:?}, \
|
||||
num_series_send_timeout={num_series_send_timeout}}}"
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -439,6 +443,12 @@ impl PartitionMetrics {
|
||||
pub(crate) fn on_finish(&self) {
|
||||
self.0.on_finish();
|
||||
}
|
||||
|
||||
/// Sets the `num_series_send_timeout`.
|
||||
pub(crate) fn set_num_series_send_timeout(&self, num_timeout: usize) {
|
||||
let mut metrics = self.0.metrics.lock().unwrap();
|
||||
metrics.num_series_send_timeout = num_timeout;
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PartitionMetrics {
|
||||
|
||||
@@ -30,7 +30,7 @@ use datatypes::schema::SchemaRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
|
||||
use store_api::storage::{TimeSeriesDistribution, TimeSeriesRowSelector};
|
||||
use store_api::storage::TimeSeriesRowSelector;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::error::{PartitionOutOfRangeSnafu, Result};
|
||||
@@ -149,7 +149,7 @@ impl SeqScan {
|
||||
/// Builds a reader to read sources. If `semaphore` is provided, reads sources in parallel
|
||||
/// if possible.
|
||||
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
|
||||
async fn build_reader_from_sources(
|
||||
pub(crate) async fn build_reader_from_sources(
|
||||
stream_ctx: &StreamContext,
|
||||
mut sources: Vec<Source>,
|
||||
semaphore: Option<Arc<Semaphore>>,
|
||||
@@ -206,9 +206,13 @@ impl SeqScan {
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
|
||||
if self.stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
|
||||
return self.scan_partition_by_series(metrics_set, partition);
|
||||
if self.properties.partitions[partition].is_empty() {
|
||||
return Ok(Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
common_recordbatch::EmptyRecordBatchStream::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
),
|
||||
)));
|
||||
}
|
||||
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
@@ -237,14 +241,14 @@ impl SeqScan {
|
||||
&mut sources,
|
||||
);
|
||||
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
let mut reader =
|
||||
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
#[cfg(debug_assertions)]
|
||||
let mut checker = crate::read::BatchChecker::default()
|
||||
.with_start(Some(part_range.start))
|
||||
@@ -307,97 +311,6 @@ impl SeqScan {
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
/// Scans all ranges in the given partition and merge by time series.
|
||||
/// Otherwise the returned stream might not contains any data.
|
||||
fn scan_partition_by_series(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
let semaphore = self.new_semaphore();
|
||||
let partition_ranges = self.properties.partitions[partition].clone();
|
||||
let distinguish_range = self.properties.distinguish_partition_range;
|
||||
let part_metrics = self.new_partition_metrics(metrics_set, partition);
|
||||
debug_assert!(!self.compaction);
|
||||
|
||||
let stream = try_stream! {
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let range_builder_list = Arc::new(RangeBuilderList::new(
|
||||
stream_ctx.input.num_memtables(),
|
||||
stream_ctx.input.num_files(),
|
||||
));
|
||||
// Scans all parts.
|
||||
let mut sources = Vec::with_capacity(partition_ranges.len());
|
||||
for part_range in partition_ranges {
|
||||
build_sources(
|
||||
&stream_ctx,
|
||||
&part_range,
|
||||
false,
|
||||
&part_metrics,
|
||||
range_builder_list.clone(),
|
||||
&mut sources,
|
||||
);
|
||||
}
|
||||
|
||||
// Builds a reader that merge sources from all parts.
|
||||
let mut reader =
|
||||
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
|
||||
while let Some(batch) = reader
|
||||
.next_batch()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
{
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
debug_assert!(!batch.is_empty());
|
||||
if batch.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let convert_start = Instant::now();
|
||||
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
|
||||
metrics.convert_cost += convert_start.elapsed();
|
||||
let yield_start = Instant::now();
|
||||
yield record_batch;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
|
||||
fetch_start = Instant::now();
|
||||
}
|
||||
|
||||
// Yields an empty part to indicate this range is terminated.
|
||||
// The query engine can use this to optimize some queries.
|
||||
if distinguish_range {
|
||||
let yield_start = Instant::now();
|
||||
yield stream_ctx.input.mapper.empty_record_batch();
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
|
||||
part_metrics.on_finish();
|
||||
};
|
||||
|
||||
let stream = Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
Box::pin(stream),
|
||||
));
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
fn new_semaphore(&self) -> Option<Arc<Semaphore>> {
|
||||
if self.properties.target_partitions() > self.properties.num_partitions() {
|
||||
// We can use additional tasks to read the data if we have more target partitions than actual partitions.
|
||||
@@ -498,7 +411,7 @@ impl fmt::Debug for SeqScan {
|
||||
}
|
||||
|
||||
/// Builds sources for the partition range and push them to the `sources` vector.
|
||||
fn build_sources(
|
||||
pub(crate) fn build_sources(
|
||||
stream_ctx: &Arc<StreamContext>,
|
||||
part_range: &PartitionRange,
|
||||
compaction: bool,
|
||||
@@ -509,8 +422,8 @@ fn build_sources(
|
||||
// Gets range meta.
|
||||
let range_meta = &stream_ctx.ranges[part_range.identifier];
|
||||
#[cfg(debug_assertions)]
|
||||
if compaction || stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
|
||||
// Compaction or per series distribution expects input sources are not been split.
|
||||
if compaction {
|
||||
// Compaction expects input sources are not been split.
|
||||
debug_assert_eq!(range_meta.indices.len(), range_meta.row_group_indices.len());
|
||||
for (i, row_group_idx) in range_meta.row_group_indices.iter().enumerate() {
|
||||
// It should scan all row groups.
|
||||
|
||||
547
src/mito2/src/read/series_scan.rs
Normal file
547
src/mito2/src/read/series_scan.rs
Normal file
@@ -0,0 +1,547 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Per-series scan implementation.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::util::ChainedRecordBatchStream;
|
||||
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
|
||||
use datatypes::compute::concat_batches;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
|
||||
use tokio::sync::mpsc::error::{SendTimeoutError, TrySendError};
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::error::{
|
||||
ComputeArrowSnafu, Error, InvalidSenderSnafu, PartitionOutOfRangeSnafu, Result,
|
||||
ScanMultiTimesSnafu, ScanSeriesSnafu,
|
||||
};
|
||||
use crate::read::range::RangeBuilderList;
|
||||
use crate::read::scan_region::{ScanInput, StreamContext};
|
||||
use crate::read::scan_util::{PartitionMetrics, PartitionMetricsList};
|
||||
use crate::read::seq_scan::{build_sources, SeqScan};
|
||||
use crate::read::{Batch, ScannerMetrics};
|
||||
|
||||
/// Timeout to send a batch to a sender.
|
||||
const SEND_TIMEOUT: Duration = Duration::from_millis(10);
|
||||
|
||||
/// List of receivers.
|
||||
type ReceiverList = Vec<Option<Receiver<Result<SeriesBatch>>>>;
|
||||
|
||||
/// Scans a region and returns sorted rows of a series in the same partition.
|
||||
///
|
||||
/// The output order is always order by `(primary key, time index)` inside every
|
||||
/// partition.
|
||||
/// Always returns the same series (primary key) to the same partition.
|
||||
pub struct SeriesScan {
|
||||
/// Properties of the scanner.
|
||||
properties: ScannerProperties,
|
||||
/// Context of streams.
|
||||
stream_ctx: Arc<StreamContext>,
|
||||
/// Receivers of each partition.
|
||||
receivers: Mutex<ReceiverList>,
|
||||
/// Metrics for each partition.
|
||||
/// The scanner only sets in query and keeps it empty during compaction.
|
||||
metrics_list: Arc<PartitionMetricsList>,
|
||||
}
|
||||
|
||||
impl SeriesScan {
|
||||
/// Creates a new [SeriesScan].
|
||||
pub(crate) fn new(input: ScanInput) -> Self {
|
||||
let mut properties = ScannerProperties::default()
|
||||
.with_append_mode(input.append_mode)
|
||||
.with_total_rows(input.total_rows());
|
||||
let stream_ctx = Arc::new(StreamContext::seq_scan_ctx(input, false));
|
||||
properties.partitions = vec![stream_ctx.partition_ranges()];
|
||||
|
||||
Self {
|
||||
properties,
|
||||
stream_ctx,
|
||||
receivers: Mutex::new(Vec::new()),
|
||||
metrics_list: Arc::new(PartitionMetricsList::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn scan_partition_impl(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
if partition >= self.properties.num_partitions() {
|
||||
return Err(BoxedError::new(
|
||||
PartitionOutOfRangeSnafu {
|
||||
given: partition,
|
||||
all: self.properties.num_partitions(),
|
||||
}
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
|
||||
self.maybe_start_distributor(metrics_set, &self.metrics_list);
|
||||
|
||||
let part_metrics =
|
||||
new_partition_metrics(&self.stream_ctx, metrics_set, partition, &self.metrics_list);
|
||||
let mut receiver = self.take_receiver(partition).map_err(BoxedError::new)?;
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
|
||||
let stream = try_stream! {
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut df_record_batches = Vec::new();
|
||||
let mut fetch_start = Instant::now();
|
||||
while let Some(result) = receiver.recv().await {
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let series = result.map_err(BoxedError::new).context(ExternalSnafu)?;
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
fetch_start = Instant::now();
|
||||
|
||||
let convert_start = Instant::now();
|
||||
df_record_batches.reserve(series.batches.len());
|
||||
for batch in series.batches {
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
|
||||
df_record_batches.push(record_batch.into_df_record_batch());
|
||||
}
|
||||
|
||||
let output_schema = stream_ctx.input.mapper.output_schema();
|
||||
let df_record_batch =
|
||||
concat_batches(output_schema.arrow_schema(), &df_record_batches)
|
||||
.context(ComputeArrowSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
df_record_batches.clear();
|
||||
let record_batch =
|
||||
RecordBatch::try_from_df_record_batch(output_schema, df_record_batch)?;
|
||||
metrics.convert_cost += convert_start.elapsed();
|
||||
|
||||
let yield_start = Instant::now();
|
||||
yield record_batch;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
}
|
||||
};
|
||||
|
||||
let stream = Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
Box::pin(stream),
|
||||
));
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
/// Takes the receiver for the partition.
|
||||
fn take_receiver(&self, partition: usize) -> Result<Receiver<Result<SeriesBatch>>> {
|
||||
let mut rx_list = self.receivers.lock().unwrap();
|
||||
rx_list[partition]
|
||||
.take()
|
||||
.context(ScanMultiTimesSnafu { partition })
|
||||
}
|
||||
|
||||
/// Starts the distributor if the receiver list is empty.
|
||||
fn maybe_start_distributor(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
metrics_list: &Arc<PartitionMetricsList>,
|
||||
) {
|
||||
let mut rx_list = self.receivers.lock().unwrap();
|
||||
if !rx_list.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (senders, receivers) = new_channel_list(self.properties.num_partitions());
|
||||
let mut distributor = SeriesDistributor {
|
||||
stream_ctx: self.stream_ctx.clone(),
|
||||
semaphore: Some(Arc::new(Semaphore::new(self.properties.num_partitions()))),
|
||||
partitions: self.properties.partitions.clone(),
|
||||
senders,
|
||||
metrics_set: metrics_set.clone(),
|
||||
metrics_list: metrics_list.clone(),
|
||||
};
|
||||
common_runtime::spawn_global(async move {
|
||||
distributor.execute().await;
|
||||
});
|
||||
|
||||
*rx_list = receivers;
|
||||
}
|
||||
|
||||
/// Scans the region and returns a stream.
|
||||
pub(crate) async fn build_stream(&self) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
let part_num = self.properties.num_partitions();
|
||||
let metrics_set = ExecutionPlanMetricsSet::default();
|
||||
let streams = (0..part_num)
|
||||
.map(|i| self.scan_partition(&metrics_set, i))
|
||||
.collect::<Result<Vec<_>, BoxedError>>()?;
|
||||
let chained_stream = ChainedRecordBatchStream::new(streams).map_err(BoxedError::new)?;
|
||||
Ok(Box::pin(chained_stream))
|
||||
}
|
||||
}
|
||||
|
||||
fn new_channel_list(num_partitions: usize) -> (SenderList, ReceiverList) {
|
||||
let (senders, receivers): (Vec<_>, Vec<_>) = (0..num_partitions)
|
||||
.map(|_| {
|
||||
let (sender, receiver) = mpsc::channel(1);
|
||||
(Some(sender), Some(receiver))
|
||||
})
|
||||
.unzip();
|
||||
(SenderList::new(senders), receivers)
|
||||
}
|
||||
|
||||
impl RegionScanner for SeriesScan {
|
||||
fn properties(&self) -> &ScannerProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream_ctx.input.mapper.output_schema()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> RegionMetadataRef {
|
||||
self.stream_ctx.input.mapper.metadata().clone()
|
||||
}
|
||||
|
||||
fn scan_partition(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
self.scan_partition_impl(metrics_set, partition)
|
||||
}
|
||||
|
||||
fn prepare(&mut self, request: PrepareRequest) -> Result<(), BoxedError> {
|
||||
self.properties.prepare(request);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_predicate(&self) -> bool {
|
||||
let predicate = self.stream_ctx.input.predicate();
|
||||
predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false)
|
||||
}
|
||||
|
||||
fn set_logical_region(&mut self, logical_region: bool) {
|
||||
self.properties.set_logical_region(logical_region);
|
||||
}
|
||||
}
|
||||
|
||||
impl DisplayAs for SeriesScan {
|
||||
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"SeriesScan: region={}, ",
|
||||
self.stream_ctx.input.mapper.metadata().region_id
|
||||
)?;
|
||||
match t {
|
||||
DisplayFormatType::Default => self.stream_ctx.format_for_explain(false, f),
|
||||
DisplayFormatType::Verbose => {
|
||||
self.stream_ctx.format_for_explain(true, f)?;
|
||||
self.metrics_list.format_verbose_metrics(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SeriesScan {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("SeriesScan")
|
||||
.field("num_ranges", &self.stream_ctx.ranges.len())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl SeriesScan {
|
||||
/// Returns the input.
|
||||
pub(crate) fn input(&self) -> &ScanInput {
|
||||
&self.stream_ctx.input
|
||||
}
|
||||
}
|
||||
|
||||
/// The distributor scans series and distributes them to different partitions.
|
||||
struct SeriesDistributor {
|
||||
/// Context for the scan stream.
|
||||
stream_ctx: Arc<StreamContext>,
|
||||
/// Optional semaphore for limiting the number of concurrent scans.
|
||||
semaphore: Option<Arc<Semaphore>>,
|
||||
/// Partition ranges to scan.
|
||||
partitions: Vec<Vec<PartitionRange>>,
|
||||
/// Senders of all partitions.
|
||||
senders: SenderList,
|
||||
/// Metrics set to report.
|
||||
/// The distributor report the metrics as an additional partition.
|
||||
/// This may double the scan cost of the [SeriesScan] metrics. We can
|
||||
/// get per-partition metrics in verbose mode to see the metrics of the
|
||||
/// distributor.
|
||||
metrics_set: ExecutionPlanMetricsSet,
|
||||
metrics_list: Arc<PartitionMetricsList>,
|
||||
}
|
||||
|
||||
impl SeriesDistributor {
|
||||
/// Executes the distributor.
|
||||
async fn execute(&mut self) {
|
||||
if let Err(e) = self.scan_partitions().await {
|
||||
self.senders.send_error(e).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Scans all parts.
|
||||
async fn scan_partitions(&mut self) -> Result<()> {
|
||||
let part_metrics = new_partition_metrics(
|
||||
&self.stream_ctx,
|
||||
&self.metrics_set,
|
||||
self.partitions.len(),
|
||||
&self.metrics_list,
|
||||
);
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let range_builder_list = Arc::new(RangeBuilderList::new(
|
||||
self.stream_ctx.input.num_memtables(),
|
||||
self.stream_ctx.input.num_files(),
|
||||
));
|
||||
// Scans all parts.
|
||||
let mut sources = Vec::with_capacity(self.partitions.len());
|
||||
for partition in &self.partitions {
|
||||
sources.reserve(partition.len());
|
||||
for part_range in partition {
|
||||
build_sources(
|
||||
&self.stream_ctx,
|
||||
part_range,
|
||||
false,
|
||||
&part_metrics,
|
||||
range_builder_list.clone(),
|
||||
&mut sources,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a reader that merge sources from all parts.
|
||||
let mut reader =
|
||||
SeqScan::build_reader_from_sources(&self.stream_ctx, sources, self.semaphore.clone())
|
||||
.await?;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
|
||||
let mut current_series = SeriesBatch::default();
|
||||
while let Some(batch) = reader.next_batch().await? {
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
fetch_start = Instant::now();
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
debug_assert!(!batch.is_empty());
|
||||
if batch.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(last_key) = current_series.current_key() else {
|
||||
current_series.push(batch);
|
||||
continue;
|
||||
};
|
||||
|
||||
if last_key == batch.primary_key() {
|
||||
current_series.push(batch);
|
||||
continue;
|
||||
}
|
||||
|
||||
// We find a new series, send the current one.
|
||||
let to_send = std::mem::replace(&mut current_series, SeriesBatch::single(batch));
|
||||
let yield_start = Instant::now();
|
||||
self.senders.send_batch(to_send).await?;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
if !current_series.is_empty() {
|
||||
let yield_start = Instant::now();
|
||||
self.senders.send_batch(current_series).await?;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
part_metrics.set_num_series_send_timeout(self.senders.num_timeout);
|
||||
|
||||
part_metrics.on_finish();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Batches of the same series.
|
||||
#[derive(Default)]
|
||||
struct SeriesBatch {
|
||||
batches: SmallVec<[Batch; 4]>,
|
||||
}
|
||||
|
||||
impl SeriesBatch {
|
||||
/// Creates a new [SeriesBatch] from a single [Batch].
|
||||
fn single(batch: Batch) -> Self {
|
||||
Self {
|
||||
batches: smallvec![batch],
|
||||
}
|
||||
}
|
||||
|
||||
fn current_key(&self) -> Option<&[u8]> {
|
||||
self.batches.first().map(|batch| batch.primary_key())
|
||||
}
|
||||
|
||||
fn push(&mut self, batch: Batch) {
|
||||
self.batches.push(batch);
|
||||
}
|
||||
|
||||
/// Returns true if there is no batch.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.batches.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// List of senders.
|
||||
struct SenderList {
|
||||
senders: Vec<Option<Sender<Result<SeriesBatch>>>>,
|
||||
/// Number of None senders.
|
||||
num_nones: usize,
|
||||
/// Index of the current partition to send.
|
||||
sender_idx: usize,
|
||||
/// Number of timeout.
|
||||
num_timeout: usize,
|
||||
}
|
||||
|
||||
impl SenderList {
|
||||
fn new(senders: Vec<Option<Sender<Result<SeriesBatch>>>>) -> Self {
|
||||
let num_nones = senders.iter().filter(|sender| sender.is_none()).count();
|
||||
Self {
|
||||
senders,
|
||||
num_nones,
|
||||
sender_idx: 0,
|
||||
num_timeout: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds a partition and tries to send the batch to the partition.
|
||||
/// Returns None if it sends successfully.
|
||||
fn try_send_batch(&mut self, mut batch: SeriesBatch) -> Result<Option<SeriesBatch>> {
|
||||
for _ in 0..self.senders.len() {
|
||||
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
|
||||
|
||||
let sender_idx = self.fetch_add_sender_idx();
|
||||
let Some(sender) = &self.senders[sender_idx] else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match sender.try_send(Ok(batch)) {
|
||||
Ok(()) => return Ok(None),
|
||||
Err(TrySendError::Full(res)) => {
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
Err(TrySendError::Closed(res)) => {
|
||||
self.senders[sender_idx] = None;
|
||||
self.num_nones += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(batch))
|
||||
}
|
||||
|
||||
/// Finds a partition and sends the batch to the partition.
|
||||
async fn send_batch(&mut self, mut batch: SeriesBatch) -> Result<()> {
|
||||
// Sends the batch without blocking first.
|
||||
match self.try_send_batch(batch)? {
|
||||
Some(b) => {
|
||||
// Unable to send batch to partition.
|
||||
batch = b;
|
||||
}
|
||||
None => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
|
||||
|
||||
let sender_idx = self.fetch_add_sender_idx();
|
||||
let Some(sender) = &self.senders[sender_idx] else {
|
||||
continue;
|
||||
};
|
||||
// Adds a timeout to avoid blocking indefinitely and sending
|
||||
// the batch in a round-robin fashion when some partitions
|
||||
// don't poll their inputs. This may happen if we have a
|
||||
// node like sort merging. But it is rare when we are using SeriesScan.
|
||||
match sender.send_timeout(Ok(batch), SEND_TIMEOUT).await {
|
||||
Ok(()) => break,
|
||||
Err(SendTimeoutError::Timeout(res)) => {
|
||||
self.num_timeout += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
Err(SendTimeoutError::Closed(res)) => {
|
||||
self.senders[sender_idx] = None;
|
||||
self.num_nones += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_error(&self, error: Error) {
|
||||
let error = Arc::new(error);
|
||||
for sender in self.senders.iter().flatten() {
|
||||
let result = Err(error.clone()).context(ScanSeriesSnafu);
|
||||
let _ = sender.send(result).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn fetch_add_sender_idx(&mut self) -> usize {
|
||||
let sender_idx = self.sender_idx;
|
||||
self.sender_idx = (self.sender_idx + 1) % self.senders.len();
|
||||
sender_idx
|
||||
}
|
||||
}
|
||||
|
||||
fn new_partition_metrics(
|
||||
stream_ctx: &StreamContext,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
metrics_list: &PartitionMetricsList,
|
||||
) -> PartitionMetrics {
|
||||
let metrics = PartitionMetrics::new(
|
||||
stream_ctx.input.mapper.metadata().region_id,
|
||||
partition,
|
||||
"SeriesScan",
|
||||
stream_ctx.query_start,
|
||||
metrics_set,
|
||||
);
|
||||
|
||||
metrics_list.set(partition, metrics.clone());
|
||||
metrics
|
||||
}
|
||||
@@ -27,7 +27,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TimeSeriesRowSelector;
|
||||
|
||||
use crate::error::{
|
||||
DecodeStatsSnafu, FieldTypeMismatchSnafu, FilterRecordBatchSnafu, Result, StatsNotPresentSnafu,
|
||||
DecodeStatsSnafu, FieldTypeMismatchSnafu, RecordBatchSnafu, Result, StatsNotPresentSnafu,
|
||||
};
|
||||
use crate::read::compat::CompatBatch;
|
||||
use crate::read::last_row::RowGroupLastRowCachedReader;
|
||||
@@ -294,7 +294,7 @@ impl RangeBase {
|
||||
};
|
||||
if filter
|
||||
.evaluate_scalar(&pk_value)
|
||||
.context(FilterRecordBatchSnafu)?
|
||||
.context(RecordBatchSnafu)?
|
||||
{
|
||||
continue;
|
||||
} else {
|
||||
@@ -311,11 +311,11 @@ impl RangeBase {
|
||||
let field_col = &input.fields()[field_index].data;
|
||||
filter
|
||||
.evaluate_vector(field_col)
|
||||
.context(FilterRecordBatchSnafu)?
|
||||
.context(RecordBatchSnafu)?
|
||||
}
|
||||
SemanticType::Timestamp => filter
|
||||
.evaluate_vector(input.timestamps())
|
||||
.context(FilterRecordBatchSnafu)?,
|
||||
.context(RecordBatchSnafu)?,
|
||||
};
|
||||
|
||||
mask = mask.bitand(&result);
|
||||
|
||||
@@ -134,7 +134,6 @@ impl WriteFormat {
|
||||
|
||||
/// Helper for reading the SST format.
|
||||
pub struct ReadFormat {
|
||||
/// The metadata stored in the SST.
|
||||
metadata: RegionMetadataRef,
|
||||
/// SST file schema.
|
||||
arrow_schema: SchemaRef,
|
||||
@@ -306,23 +305,17 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => self.tag_values(row_groups, column, true),
|
||||
SemanticType::Field => {
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_values(row_groups, column, *index, true);
|
||||
StatValues::from_stats_opt(stats)
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_values(row_groups, column, *index, true)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
let stats = Self::column_values(row_groups, column, index, true);
|
||||
StatValues::from_stats_opt(stats)
|
||||
Self::column_values(row_groups, column, index, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -332,23 +325,17 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => self.tag_values(row_groups, column, false),
|
||||
SemanticType::Field => {
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_values(row_groups, column, *index, false);
|
||||
StatValues::from_stats_opt(stats)
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_values(row_groups, column, *index, false)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
let stats = Self::column_values(row_groups, column, index, false);
|
||||
StatValues::from_stats_opt(stats)
|
||||
Self::column_values(row_groups, column, index, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,23 +345,17 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => StatValues::NoStats,
|
||||
SemanticType::Tag => None,
|
||||
SemanticType::Field => {
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_null_counts(row_groups, *index);
|
||||
StatValues::from_stats_opt(stats)
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_null_counts(row_groups, *index)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
let stats = Self::column_null_counts(row_groups, index);
|
||||
StatValues::from_stats_opt(stats)
|
||||
Self::column_null_counts(row_groups, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -409,7 +390,8 @@ impl ReadFormat {
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
is_min: bool,
|
||||
) -> StatValues {
|
||||
) -> Option<ArrayRef> {
|
||||
let primary_key_encoding = self.metadata.primary_key_encoding;
|
||||
let is_first_tag = self
|
||||
.metadata
|
||||
.primary_key
|
||||
@@ -418,28 +400,9 @@ impl ReadFormat {
|
||||
.unwrap_or(false);
|
||||
if !is_first_tag {
|
||||
// Only the min-max of the first tag is available in the primary key.
|
||||
return StatValues::NoStats;
|
||||
return None;
|
||||
}
|
||||
|
||||
StatValues::from_stats_opt(self.first_tag_values(row_groups, column, is_min))
|
||||
}
|
||||
|
||||
/// Returns min/max values of the first tag.
|
||||
/// Returns None if the tag does not have statistics.
|
||||
fn first_tag_values(
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
is_min: bool,
|
||||
) -> Option<ArrayRef> {
|
||||
debug_assert!(self
|
||||
.metadata
|
||||
.primary_key
|
||||
.first()
|
||||
.map(|id| *id == column.column_id)
|
||||
.unwrap_or(false));
|
||||
|
||||
let primary_key_encoding = self.metadata.primary_key_encoding;
|
||||
let converter = build_primary_key_codec_with_fields(
|
||||
primary_key_encoding,
|
||||
[(
|
||||
@@ -489,7 +452,6 @@ impl ReadFormat {
|
||||
}
|
||||
|
||||
/// Returns min/max values of specific non-tag columns.
|
||||
/// Returns None if the column does not have statistics.
|
||||
fn column_values(
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
@@ -582,29 +544,6 @@ impl ReadFormat {
|
||||
}
|
||||
}
|
||||
|
||||
/// Values of column statistics of the SST.
|
||||
///
|
||||
/// It also distinguishes the case that a column is not found and
|
||||
/// the column exists but has no statistics.
|
||||
pub enum StatValues {
|
||||
/// Values of each row group.
|
||||
Values(ArrayRef),
|
||||
/// No such column.
|
||||
NoColumn,
|
||||
/// Column exists but has no statistics.
|
||||
NoStats,
|
||||
}
|
||||
|
||||
impl StatValues {
|
||||
/// Creates a new `StatValues` instance from optional statistics.
|
||||
pub fn from_stats_opt(stats: Option<ArrayRef>) -> Self {
|
||||
match stats {
|
||||
Some(stats) => StatValues::Values(stats),
|
||||
None => StatValues::NoStats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl ReadFormat {
|
||||
/// Creates a helper with existing `metadata` and all columns.
|
||||
|
||||
@@ -25,7 +25,7 @@ use parquet::file::metadata::RowGroupMetaData;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::storage::ColumnId;
|
||||
|
||||
use crate::sst::parquet::format::{ReadFormat, StatValues};
|
||||
use crate::sst::parquet::format::ReadFormat;
|
||||
|
||||
/// Statistics for pruning row groups.
|
||||
pub(crate) struct RowGroupPruningStats<'a, T> {
|
||||
@@ -100,18 +100,16 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
|
||||
fn min_values(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.min_values(self.row_groups, column_id) {
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_default_value(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
Some(values) => Some(values),
|
||||
None => self.compat_default_value(&column.name),
|
||||
}
|
||||
}
|
||||
|
||||
fn max_values(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.max_values(self.row_groups, column_id) {
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_default_value(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
Some(values) => Some(values),
|
||||
None => self.compat_default_value(&column.name),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,12 +118,10 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
|
||||
}
|
||||
|
||||
fn null_counts(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.null_counts(self.row_groups, column_id) {
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_null_count(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
}
|
||||
let Some(column_id) = self.column_id_to_prune(&column.name) else {
|
||||
return self.compat_null_count(&column.name);
|
||||
};
|
||||
self.read_format.null_counts(self.row_groups, column_id)
|
||||
}
|
||||
|
||||
fn row_counts(&self, _column: &Column) -> Option<ArrayRef> {
|
||||
|
||||
@@ -91,9 +91,9 @@ impl UserDefinedLogicalNodeCore for InstantManipulate {
|
||||
_exprs: Vec<Expr>,
|
||||
inputs: Vec<LogicalPlan>,
|
||||
) -> DataFusionResult<Self> {
|
||||
if inputs.is_empty() {
|
||||
if inputs.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"InstantManipulate should have at least one input".to_string(),
|
||||
"InstantManipulate should have exact one input".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -354,6 +354,9 @@ impl Stream for InstantManipulateStream {
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let poll = match ready!(self.input.poll_next_unpin(cx)) {
|
||||
Some(Ok(batch)) => {
|
||||
if batch.num_rows() == 0 {
|
||||
return Poll::Pending;
|
||||
}
|
||||
let timer = std::time::Instant::now();
|
||||
self.num_series.add(1);
|
||||
let result = Ok(batch).and_then(|batch| self.manipulate(batch));
|
||||
|
||||
@@ -42,7 +42,7 @@ use greptime_proto::substrait_extension as pb;
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DataFusionPlanningSnafu, DeserializeSnafu, Result};
|
||||
use crate::error::{DeserializeSnafu, Result};
|
||||
use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES};
|
||||
use crate::metrics::PROMQL_SERIES_COUNT;
|
||||
use crate::range_array::RangeArray;
|
||||
@@ -194,20 +194,26 @@ impl RangeManipulate {
|
||||
|
||||
pub fn deserialize(bytes: &[u8]) -> Result<Self> {
|
||||
let pb_range_manipulate = pb::RangeManipulate::decode(bytes).context(DeserializeSnafu)?;
|
||||
let empty_schema = Arc::new(DFSchema::empty());
|
||||
let placeholder_plan = LogicalPlan::EmptyRelation(EmptyRelation {
|
||||
produce_one_row: false,
|
||||
schema: Arc::new(DFSchema::empty()),
|
||||
schema: empty_schema.clone(),
|
||||
});
|
||||
Self::new(
|
||||
pb_range_manipulate.start,
|
||||
pb_range_manipulate.end,
|
||||
pb_range_manipulate.interval,
|
||||
pb_range_manipulate.range,
|
||||
pb_range_manipulate.time_index,
|
||||
pb_range_manipulate.tag_columns,
|
||||
placeholder_plan,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)
|
||||
|
||||
// Unlike `Self::new()`, this method doesn't check the input schema as it will fail
|
||||
// because the input schema is empty.
|
||||
// But this is Ok since datafusion guarantees to call `with_exprs_and_inputs` for the
|
||||
// deserialized plan.
|
||||
Ok(Self {
|
||||
start: pb_range_manipulate.start,
|
||||
end: pb_range_manipulate.end,
|
||||
interval: pb_range_manipulate.interval,
|
||||
range: pb_range_manipulate.range,
|
||||
time_index: pb_range_manipulate.time_index,
|
||||
field_columns: pb_range_manipulate.tag_columns,
|
||||
input: placeholder_plan,
|
||||
output_schema: empty_schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,14 +276,19 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
fn with_exprs_and_inputs(
|
||||
&self,
|
||||
_exprs: Vec<Expr>,
|
||||
inputs: Vec<LogicalPlan>,
|
||||
mut inputs: Vec<LogicalPlan>,
|
||||
) -> DataFusionResult<Self> {
|
||||
if inputs.is_empty() {
|
||||
if inputs.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"RangeManipulate should have at least one input".to_string(),
|
||||
"RangeManipulate should have at exact one input".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let input: LogicalPlan = inputs.pop().unwrap();
|
||||
let input_schema = input.schema();
|
||||
let output_schema =
|
||||
Self::calculate_output_schema(input_schema, &self.time_index, &self.field_columns)?;
|
||||
|
||||
Ok(Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -285,8 +296,8 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
range: self.range,
|
||||
time_index: self.time_index.clone(),
|
||||
field_columns: self.field_columns.clone(),
|
||||
input: inputs.into_iter().next().unwrap(),
|
||||
output_schema: self.output_schema.clone(),
|
||||
input,
|
||||
output_schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,6 +106,10 @@ impl SeriesDivide {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tags(&self) -> &[String] {
|
||||
&self.tag_columns
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
pb::SeriesDivide {
|
||||
tag_columns: self.tag_columns.clone(),
|
||||
@@ -315,7 +319,9 @@ impl Stream for SeriesDivideStream {
|
||||
let next_batch = ready!(self.as_mut().fetch_next_batch(cx)).transpose()?;
|
||||
let timer = std::time::Instant::now();
|
||||
if let Some(next_batch) = next_batch {
|
||||
self.buffer.push(next_batch);
|
||||
if next_batch.num_rows() != 0 {
|
||||
self.buffer.push(next_batch);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
// input stream is ended
|
||||
|
||||
@@ -40,7 +40,7 @@ pub use holt_winters::HoltWinters;
|
||||
pub use idelta::IDelta;
|
||||
pub use predict_linear::PredictLinear;
|
||||
pub use quantile::QuantileOverTime;
|
||||
pub use quantile_aggr::quantile_udaf;
|
||||
pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME};
|
||||
pub use resets::Resets;
|
||||
pub use round::Round;
|
||||
|
||||
|
||||
@@ -228,7 +228,7 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
|
||||
|
||||
// delta
|
||||
impl ExtrapolatedRate<false, false> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_delta"
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ impl ExtrapolatedRate<false, false> {
|
||||
|
||||
// rate
|
||||
impl ExtrapolatedRate<true, true> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_rate"
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ impl ExtrapolatedRate<true, true> {
|
||||
|
||||
// increase
|
||||
impl ExtrapolatedRate<true, false> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_increase"
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use datatypes::arrow::datatypes::{DataType, Field, Float64Type};
|
||||
|
||||
use crate::functions::quantile::quantile_impl;
|
||||
|
||||
const QUANTILE_NAME: &str = "quantile";
|
||||
pub const QUANTILE_NAME: &str = "quantile";
|
||||
|
||||
const VALUES_FIELD_NAME: &str = "values";
|
||||
const DEFAULT_LIST_FIELD_NAME: &str = "item";
|
||||
|
||||
@@ -20,7 +20,7 @@ use arrow_schema::DataType;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use common_function::aggr::{
|
||||
GeoPathAccumulator, HllState, UddSketchState, GEO_PATH_NAME, HLL_MERGE_NAME, HLL_NAME,
|
||||
UDDSKETCH_MERGE_NAME, UDDSKETCH_STATE_NAME,
|
||||
UDDSKETCH_STATE_NAME,
|
||||
};
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_query::logical_plan::create_aggregate_function;
|
||||
@@ -165,9 +165,7 @@ impl ContextProvider for DfContextProviderAdapter {
|
||||
|
||||
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
|
||||
if name == UDDSKETCH_STATE_NAME {
|
||||
return Some(Arc::new(UddSketchState::state_udf_impl()));
|
||||
} else if name == UDDSKETCH_MERGE_NAME {
|
||||
return Some(Arc::new(UddSketchState::merge_udf_impl()));
|
||||
return Some(Arc::new(UddSketchState::udf_impl()));
|
||||
} else if name == HLL_NAME {
|
||||
return Some(Arc::new(HllState::state_udf_impl()));
|
||||
} else if name == HLL_MERGE_NAME {
|
||||
|
||||
@@ -55,12 +55,16 @@ impl Categorizer {
|
||||
LogicalPlan::Filter(filter) => Self::check_expr(&filter.predicate),
|
||||
LogicalPlan::Window(_) => Commutativity::Unimplemented,
|
||||
LogicalPlan::Aggregate(aggr) => {
|
||||
if Self::check_partition(&aggr.group_expr, &partition_cols) {
|
||||
return Commutativity::Commutative;
|
||||
if !Self::check_partition(&aggr.group_expr, &partition_cols) {
|
||||
return Commutativity::NonCommutative;
|
||||
}
|
||||
|
||||
// check all children exprs and uses the strictest level
|
||||
Commutativity::Unimplemented
|
||||
for expr in &aggr.aggr_expr {
|
||||
let commutativity = Self::check_expr(expr);
|
||||
if !matches!(commutativity, Commutativity::Commutative) {
|
||||
return commutativity;
|
||||
}
|
||||
}
|
||||
Commutativity::Commutative
|
||||
}
|
||||
LogicalPlan::Sort(_) => {
|
||||
if partition_cols.is_empty() {
|
||||
@@ -94,7 +98,7 @@ impl Categorizer {
|
||||
}
|
||||
}
|
||||
LogicalPlan::Extension(extension) => {
|
||||
Self::check_extension_plan(extension.node.as_ref() as _)
|
||||
Self::check_extension_plan(extension.node.as_ref() as _, &partition_cols)
|
||||
}
|
||||
LogicalPlan::Distinct(_) => {
|
||||
if partition_cols.is_empty() {
|
||||
@@ -116,13 +120,30 @@ impl Categorizer {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_extension_plan(plan: &dyn UserDefinedLogicalNode) -> Commutativity {
|
||||
pub fn check_extension_plan(
|
||||
plan: &dyn UserDefinedLogicalNode,
|
||||
partition_cols: &[String],
|
||||
) -> Commutativity {
|
||||
match plan.name() {
|
||||
name if name == EmptyMetric::name()
|
||||
name if name == SeriesDivide::name() => {
|
||||
let series_divide = plan.as_any().downcast_ref::<SeriesDivide>().unwrap();
|
||||
let tags = series_divide.tags().iter().collect::<HashSet<_>>();
|
||||
for partition_col in partition_cols {
|
||||
if !tags.contains(partition_col) {
|
||||
return Commutativity::NonCommutative;
|
||||
}
|
||||
}
|
||||
Commutativity::Commutative
|
||||
}
|
||||
name if name == SeriesNormalize::name()
|
||||
|| name == InstantManipulate::name()
|
||||
|| name == SeriesNormalize::name()
|
||||
|| name == RangeManipulate::name()
|
||||
|| name == SeriesDivide::name()
|
||||
|| name == RangeManipulate::name() =>
|
||||
{
|
||||
// They should always follows Series Divide.
|
||||
// Either all commutative or all non-commutative (which will be blocked by SeriesDivide).
|
||||
Commutativity::Commutative
|
||||
}
|
||||
name if name == EmptyMetric::name()
|
||||
|| name == MergeScanLogicalPlan::name()
|
||||
|| name == MergeSortLogicalPlan::name() =>
|
||||
{
|
||||
@@ -148,8 +169,9 @@ impl Categorizer {
|
||||
| Expr::Negative(_)
|
||||
| Expr::Between(_)
|
||||
| Expr::Exists(_)
|
||||
| Expr::InList(_)
|
||||
| Expr::ScalarFunction(_) => Commutativity::Commutative,
|
||||
| Expr::InList(_) => Commutativity::Commutative,
|
||||
Expr::ScalarFunction(_udf) => Commutativity::Commutative,
|
||||
Expr::AggregateFunction(_udaf) => Commutativity::Commutative,
|
||||
|
||||
Expr::Like(_)
|
||||
| Expr::SimilarTo(_)
|
||||
@@ -158,7 +180,6 @@ impl Categorizer {
|
||||
| Expr::Case(_)
|
||||
| Expr::Cast(_)
|
||||
| Expr::TryCast(_)
|
||||
| Expr::AggregateFunction(_)
|
||||
| Expr::WindowFunction(_)
|
||||
| Expr::InSubquery(_)
|
||||
| Expr::ScalarSubquery(_)
|
||||
|
||||
@@ -62,21 +62,28 @@ impl ParallelizeScan {
|
||||
} else if let Some(region_scan_exec) =
|
||||
plan.as_any().downcast_ref::<RegionScanExec>()
|
||||
{
|
||||
let expected_partition_num = config.execution.target_partitions;
|
||||
if region_scan_exec.is_partition_set() {
|
||||
return Ok(Transformed::no(plan));
|
||||
}
|
||||
|
||||
// don't parallelize if we want per series distribution
|
||||
if matches!(
|
||||
region_scan_exec.distribution(),
|
||||
Some(TimeSeriesDistribution::PerSeries)
|
||||
) {
|
||||
return Ok(Transformed::no(plan));
|
||||
let partition_range = region_scan_exec.get_partition_ranges();
|
||||
// HACK: Allocate expected_partition_num empty partitions to indicate
|
||||
// the expected partition number.
|
||||
let mut new_partitions = vec![vec![]; expected_partition_num];
|
||||
new_partitions[0] = partition_range;
|
||||
let new_plan = region_scan_exec
|
||||
.with_new_partitions(new_partitions, expected_partition_num)
|
||||
.map_err(|e| DataFusionError::External(e.into_inner()))?;
|
||||
return Ok(Transformed::yes(Arc::new(new_plan)));
|
||||
}
|
||||
|
||||
let ranges = region_scan_exec.get_partition_ranges();
|
||||
let total_range_num = ranges.len();
|
||||
let expected_partition_num = config.execution.target_partitions;
|
||||
|
||||
// assign ranges to each partition
|
||||
let mut partition_ranges =
|
||||
@@ -131,26 +138,18 @@ impl ParallelizeScan {
|
||||
) -> Vec<Vec<PartitionRange>> {
|
||||
if ranges.is_empty() {
|
||||
// Returns a single partition with no range.
|
||||
return vec![vec![]];
|
||||
return vec![vec![]; expected_partition_num];
|
||||
}
|
||||
|
||||
if ranges.len() == 1 {
|
||||
return vec![ranges];
|
||||
let mut vec = vec![vec![]; expected_partition_num];
|
||||
vec[0] = ranges;
|
||||
return vec;
|
||||
}
|
||||
|
||||
// Sort ranges by number of rows in descending order.
|
||||
ranges.sort_by(|a, b| b.num_rows.cmp(&a.num_rows));
|
||||
// Get the max row number of the ranges. Note that the number of rows may be 0 if statistics are not available.
|
||||
let max_rows = ranges[0].num_rows;
|
||||
let total_rows = ranges.iter().map(|range| range.num_rows).sum::<usize>();
|
||||
// Computes the partition num by the max row number. This eliminates the unbalance of the partitions.
|
||||
let balanced_partition_num = if max_rows > 0 {
|
||||
total_rows.div_ceil(max_rows)
|
||||
} else {
|
||||
ranges.len()
|
||||
};
|
||||
let actual_partition_num = expected_partition_num.min(balanced_partition_num).max(1);
|
||||
let mut partition_ranges = vec![vec![]; actual_partition_num];
|
||||
let mut partition_ranges = vec![vec![]; expected_partition_num];
|
||||
|
||||
#[derive(Eq, PartialEq)]
|
||||
struct HeapNode {
|
||||
@@ -172,7 +171,7 @@ impl ParallelizeScan {
|
||||
}
|
||||
|
||||
let mut part_heap =
|
||||
BinaryHeap::from_iter((0..actual_partition_num).map(|partition_idx| HeapNode {
|
||||
BinaryHeap::from_iter((0..expected_partition_num).map(|partition_idx| HeapNode {
|
||||
num_rows: 0,
|
||||
partition_idx,
|
||||
}));
|
||||
@@ -263,7 +262,7 @@ mod test {
|
||||
];
|
||||
assert_eq!(result, expected);
|
||||
|
||||
// assign 4 ranges to 5 partitions. Only 4 partitions are returned.
|
||||
// assign 4 ranges to 5 partitions.
|
||||
let expected_partition_num = 5;
|
||||
let result = ParallelizeScan::assign_partition_range(ranges, expected_partition_num);
|
||||
let expected = vec![
|
||||
@@ -273,32 +272,31 @@ mod test {
|
||||
num_rows: 250,
|
||||
identifier: 4,
|
||||
}],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(0, TimeUnit::Second),
|
||||
end: Timestamp::new(10, TimeUnit::Second),
|
||||
num_rows: 100,
|
||||
identifier: 1,
|
||||
}],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(10, TimeUnit::Second),
|
||||
end: Timestamp::new(20, TimeUnit::Second),
|
||||
num_rows: 200,
|
||||
identifier: 2,
|
||||
}],
|
||||
vec![
|
||||
PartitionRange {
|
||||
start: Timestamp::new(20, TimeUnit::Second),
|
||||
end: Timestamp::new(30, TimeUnit::Second),
|
||||
num_rows: 150,
|
||||
identifier: 3,
|
||||
},
|
||||
PartitionRange {
|
||||
start: Timestamp::new(0, TimeUnit::Second),
|
||||
end: Timestamp::new(10, TimeUnit::Second),
|
||||
num_rows: 100,
|
||||
identifier: 1,
|
||||
},
|
||||
],
|
||||
vec![],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(20, TimeUnit::Second),
|
||||
end: Timestamp::new(30, TimeUnit::Second),
|
||||
num_rows: 150,
|
||||
identifier: 3,
|
||||
}],
|
||||
];
|
||||
assert_eq!(result, expected);
|
||||
|
||||
// assign 0 ranges to 5 partitions. Only 1 partition is returned.
|
||||
// assign 0 ranges to 5 partitions. Should return 5 empty ranges.
|
||||
let result = ParallelizeScan::assign_partition_range(vec![], 5);
|
||||
assert_eq!(result.len(), 1);
|
||||
assert_eq!(result.len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -29,6 +29,11 @@ use datafusion::execution::{FunctionRegistry, SessionStateBuilder};
|
||||
use datafusion::logical_expr::LogicalPlan;
|
||||
use datafusion_expr::UserDefinedLogicalNode;
|
||||
use greptime_proto::substrait_extension::MergeScan as PbMergeScan;
|
||||
use promql::functions::{
|
||||
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, IDelta, Increase,
|
||||
LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, Rate, Resets, StddevOverTime,
|
||||
StdvarOverTime, SumOverTime,
|
||||
};
|
||||
use prost::Message;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
@@ -128,11 +133,30 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
||||
session_state
|
||||
.register_udf(udf)
|
||||
.context(RegisterUdfSnafu { name: func.name() })?;
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::merge_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl()));
|
||||
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
|
||||
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
|
||||
}
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
.decode(message, session_state)
|
||||
|
||||
@@ -31,6 +31,7 @@ use datafusion::error::Result as DfResult;
|
||||
use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionContext, SessionState};
|
||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
use datafusion::physical_optimizer::enforce_sorting::EnforceSorting;
|
||||
use datafusion::physical_optimizer::optimizer::PhysicalOptimizer;
|
||||
use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan;
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
@@ -142,6 +143,9 @@ impl QueryEngineState {
|
||||
physical_optimizer
|
||||
.rules
|
||||
.insert(1, Arc::new(PassDistribution));
|
||||
physical_optimizer
|
||||
.rules
|
||||
.insert(2, Arc::new(EnforceSorting {}));
|
||||
// Add rule for windowed sort
|
||||
physical_optimizer
|
||||
.rules
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod dist_table;
|
||||
pub mod error;
|
||||
|
||||
@@ -82,11 +82,17 @@ impl RegionScanExec {
|
||||
if scanner.properties().is_logical_region() {
|
||||
pk_names.sort_unstable();
|
||||
}
|
||||
let mut pk_columns: Vec<PhysicalSortExpr> = pk_names
|
||||
.into_iter()
|
||||
let pk_columns = pk_names
|
||||
.iter()
|
||||
.filter_map(
|
||||
|col| Some(Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _),
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
let mut pk_sort_columns: Vec<PhysicalSortExpr> = pk_names
|
||||
.iter()
|
||||
.filter_map(|col| {
|
||||
Some(PhysicalSortExpr::new(
|
||||
Arc::new(Column::new_with_schema(&col, &arrow_schema).ok()?) as _,
|
||||
Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _,
|
||||
SortOptions {
|
||||
descending: false,
|
||||
nulls_first: true,
|
||||
@@ -113,28 +119,37 @@ impl RegionScanExec {
|
||||
let eq_props = match request.distribution {
|
||||
Some(TimeSeriesDistribution::PerSeries) => {
|
||||
if let Some(ts) = ts_col {
|
||||
pk_columns.push(ts);
|
||||
pk_sort_columns.push(ts);
|
||||
}
|
||||
EquivalenceProperties::new_with_orderings(
|
||||
arrow_schema.clone(),
|
||||
&[LexOrdering::new(pk_columns)],
|
||||
&[LexOrdering::new(pk_sort_columns)],
|
||||
)
|
||||
}
|
||||
Some(TimeSeriesDistribution::TimeWindowed) => {
|
||||
if let Some(ts_col) = ts_col {
|
||||
pk_columns.insert(0, ts_col);
|
||||
pk_sort_columns.insert(0, ts_col);
|
||||
}
|
||||
EquivalenceProperties::new_with_orderings(
|
||||
arrow_schema.clone(),
|
||||
&[LexOrdering::new(pk_columns)],
|
||||
&[LexOrdering::new(pk_sort_columns)],
|
||||
)
|
||||
}
|
||||
None => EquivalenceProperties::new(arrow_schema.clone()),
|
||||
};
|
||||
|
||||
let partitioning = match request.distribution {
|
||||
Some(TimeSeriesDistribution::PerSeries) => {
|
||||
Partitioning::Hash(pk_columns.clone(), num_output_partition)
|
||||
}
|
||||
Some(TimeSeriesDistribution::TimeWindowed) | None => {
|
||||
Partitioning::UnknownPartitioning(num_output_partition)
|
||||
}
|
||||
};
|
||||
|
||||
let properties = PlanProperties::new(
|
||||
eq_props,
|
||||
Partitioning::UnknownPartitioning(num_output_partition),
|
||||
partitioning,
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
@@ -188,9 +203,14 @@ impl RegionScanExec {
|
||||
warn!("Setting partition ranges more than once for RegionScanExec");
|
||||
}
|
||||
|
||||
let num_partitions = partitions.len();
|
||||
let mut properties = self.properties.clone();
|
||||
properties.partitioning = Partitioning::UnknownPartitioning(num_partitions);
|
||||
let new_partitioning = match properties.partitioning {
|
||||
Partitioning::Hash(ref columns, _) => {
|
||||
Partitioning::Hash(columns.clone(), target_partitions)
|
||||
}
|
||||
_ => Partitioning::UnknownPartitioning(target_partitions),
|
||||
};
|
||||
properties.partitioning = new_partitioning;
|
||||
|
||||
{
|
||||
let mut scanner = self.scanner.lock().unwrap();
|
||||
|
||||
@@ -52,41 +52,7 @@ select uddsketch_calc(0.95, uddsketch_state(128, 0.01, `value`)) from test_uddsk
|
||||
| 100.49456770856492 |
|
||||
+----------------------------------------------------------------------------------------------+
|
||||
|
||||
CREATE TABLE grouped_uddsketch (
|
||||
`state` BINARY,
|
||||
id_group INT PRIMARY KEY,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO grouped_uddsketch (`state`, id_group) SELECT uddsketch_state(128, 0.01, `value`), `id`/5*5 as id_group FROM test_uddsketch GROUP BY id_group;
|
||||
|
||||
Affected Rows: 3
|
||||
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(128, 0.01, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
+------------------------------------------------------------------------------------------------+
|
||||
| uddsketch_calc(Float64(0.1),uddsketch_merge(Int64(128),Float64(0.01),grouped_uddsketch.state)) |
|
||||
+------------------------------------------------------------------------------------------------+
|
||||
| 19.886670240866184 |
|
||||
+------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- should fail
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(128, 0.1, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), Error during planning: Merging UDDSketch with different parameters: arguments=(128, 0.1) vs actual input=(128, 0.01)
|
||||
|
||||
-- should fail
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(64, 0.01, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), Error during planning: Merging UDDSketch with different parameters: arguments=(64, 0.01) vs actual input=(128, 0.01)
|
||||
|
||||
drop table test_uddsketch;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
drop table grouped_uddsketch;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -24,21 +24,4 @@ select uddsketch_calc(0.75, uddsketch_state(128, 0.01, `value`)) from test_uddsk
|
||||
|
||||
select uddsketch_calc(0.95, uddsketch_state(128, 0.01, `value`)) from test_uddsketch;
|
||||
|
||||
CREATE TABLE grouped_uddsketch (
|
||||
`state` BINARY,
|
||||
id_group INT PRIMARY KEY,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
INSERT INTO grouped_uddsketch (`state`, id_group) SELECT uddsketch_state(128, 0.01, `value`), `id`/5*5 as id_group FROM test_uddsketch GROUP BY id_group;
|
||||
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(128, 0.01, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
-- should fail
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(128, 0.1, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
-- should fail
|
||||
SELECT uddsketch_calc(0.1, uddsketch_merge(64, 0.01, `state`)) FROM grouped_uddsketch;
|
||||
|
||||
drop table test_uddsketch;
|
||||
drop table grouped_uddsketch;
|
||||
|
||||
@@ -31,15 +31,6 @@ FROM
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
distinct_basic
|
||||
VALUES
|
||||
|
||||
@@ -23,9 +23,6 @@ FROM
|
||||
distinct_basic;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
distinct_basic
|
||||
VALUES
|
||||
|
||||
@@ -44,15 +44,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
|
||||
+----------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
+-------------------+--------------------------------------------------+
|
||||
@@ -110,16 +101,6 @@ GROUP BY
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
+-------------------+--------------------------------------------------+
|
||||
@@ -137,15 +118,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
|
||||
+-------------------+--------------------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE FLOW test_numbers_basic;
|
||||
|
||||
+--------------------+---------------------------------------------------------------------------------------+
|
||||
|
||||
@@ -20,9 +20,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
|
||||
ADMIN FLUSH_FLOW('test_numbers_basic');
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
SHOW CREATE FLOW test_numbers_basic;
|
||||
@@ -47,16 +44,10 @@ FROM
|
||||
numbers_input_basic
|
||||
GROUP BY
|
||||
ts;
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SHOW CREATE FLOW test_numbers_basic;
|
||||
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
@@ -62,15 +62,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
|
||||
+-------------------+--------------------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
numbers_input_basic
|
||||
VALUES
|
||||
@@ -215,15 +206,6 @@ SHOW CREATE TABLE out_basic;
|
||||
+-----------+---------------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
input_basic
|
||||
VALUES
|
||||
@@ -324,15 +306,6 @@ ADMIN FLUSH_FLOW('test_distinct_basic');
|
||||
+-----------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
distinct_basic
|
||||
VALUES
|
||||
@@ -1692,15 +1665,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
|
||||
+----------------------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
numbers_input_basic
|
||||
VALUES
|
||||
|
||||
@@ -24,9 +24,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
|
||||
SHOW CREATE TABLE out_num_cnt_basic;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
numbers_input_basic
|
||||
VALUES
|
||||
@@ -94,9 +91,6 @@ FROM
|
||||
SHOW CREATE TABLE out_basic;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
input_basic
|
||||
VALUES
|
||||
@@ -136,9 +130,6 @@ SHOW CREATE TABLE out_distinct_basic;
|
||||
ADMIN FLUSH_FLOW('test_distinct_basic');
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
distinct_basic
|
||||
VALUES
|
||||
@@ -797,9 +788,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
|
||||
ADMIN FLUSH_FLOW('test_numbers_basic');
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
INSERT INTO
|
||||
numbers_input_basic
|
||||
VALUES
|
||||
|
||||
@@ -730,21 +730,10 @@ SELECT key FROM api_stats;
|
||||
+-----+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 5s
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
-- wait more time so flownode have time to recover flows
|
||||
-- SQLNESS SLEEP 5s
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
|
||||
@@ -399,13 +399,8 @@ ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
SELECT key FROM api_stats;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 5s
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
-- wait more time so flownode have time to recover flows
|
||||
-- SQLNESS SLEEP 5s
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
+-----------------------------------------+
|
||||
|
||||
-- query should return 3 rows
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
@@ -64,7 +63,6 @@ ORDER BY
|
||||
+------------+---------------------+
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
@@ -78,7 +76,6 @@ ORDER BY
|
||||
+------------+---------------------+---------------------------------+
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
@@ -94,8 +91,8 @@ ORDER BY
|
||||
+------------+---------------------+------------+
|
||||
| url | time_window_1m | uv_per_min |
|
||||
+------------+---------------------+------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 | 3 |
|
||||
| /not_found | 2025-03-04T00:00:00 | 1 |
|
||||
| /dashboard | 2025-03-04T00:00:00 | 3 |
|
||||
+------------+---------------------+------------+
|
||||
|
||||
DROP FLOW calc_access_log_10s;
|
||||
@@ -204,13 +201,6 @@ CREATE TABLE percentile_5s (
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_10s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
@@ -223,18 +213,6 @@ GROUP BY
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_10s SINK TO percentile_10s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_merge(128, 0.01, percentile_state),
|
||||
date_bin('10 seconds'::INTERVAL, time_window) AS time_window
|
||||
FROM
|
||||
percentile_5s
|
||||
GROUP BY
|
||||
date_bin('10 seconds'::INTERVAL, time_window);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
@@ -258,15 +236,6 @@ ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
| FLOW_FLUSHED |
|
||||
+----------------------------------------+
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_10s');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_percentile_10s') |
|
||||
+-----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
@@ -283,37 +252,14 @@ ORDER BY
|
||||
| 1970-01-01T00:00:10 | |
|
||||
+---------------------+--------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+---------------------+--------------------+
|
||||
| time_window | p99 |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 59.745049810145126 |
|
||||
| 1970-01-01T00:00:10 | |
|
||||
+---------------------+--------------------+
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP FLOW calc_percentile_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -36,19 +36,16 @@ INSERT INTO access_log VALUES
|
||||
ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
|
||||
-- query should return 3 rows
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
@@ -126,11 +123,6 @@ CREATE TABLE percentile_5s (
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE TABLE percentile_10s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
@@ -141,16 +133,6 @@ FROM
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
CREATE FLOW calc_percentile_10s SINK TO percentile_10s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_merge(128, 0.01, percentile_state),
|
||||
date_bin('10 seconds'::INTERVAL, time_window) AS time_window
|
||||
FROM
|
||||
percentile_5s
|
||||
GROUP BY
|
||||
date_bin('10 seconds'::INTERVAL, time_window);
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
@@ -166,9 +148,6 @@ INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_10s');
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
@@ -177,16 +156,6 @@ FROM
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
DROP FLOW calc_percentile_10s;
|
||||
DROP TABLE percentile_5s;
|
||||
DROP TABLE percentile_10s;
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
@@ -263,15 +263,6 @@ SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORM
|
||||
|
||||
-- makesure after recover should be the same
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';
|
||||
|
||||
+---------------------+---------------+-------------------------------------------------------------+------------------------------------+
|
||||
|
||||
@@ -108,9 +108,6 @@ SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORM
|
||||
|
||||
-- makesure after recover should be the same
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS SLEEP 3s
|
||||
|
||||
SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ select GREATEST('1999-01-30', '2023-03-01');
|
||||
+-------------------------------------------------+
|
||||
| greatest(Utf8("1999-01-30"),Utf8("2023-03-01")) |
|
||||
+-------------------------------------------------+
|
||||
| 2023-03-01 |
|
||||
| 2023-03-01T00:00:00 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
|
||||
|
||||
@@ -130,8 +130,7 @@ tql eval (3000, 3000, '1s') label_replace(histogram_quantile(0.8, histogram_buck
|
||||
-- quantile with rate is covered in other cases
|
||||
tql eval (3000, 3000, '1s') histogram_quantile(0.2, rate(histogram_bucket[5m]));
|
||||
|
||||
++
|
||||
++
|
||||
Error: 3001(EngineExecuteQuery), Unsupported arrow data type, type: Dictionary(Int64, Float64)
|
||||
|
||||
drop table histogram_bucket;
|
||||
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
+-------------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
|
||||
+-------------------------------------------+
|
||||
| 0 |
|
||||
+-------------------------------------------+
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
+-------------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
|
||||
+-------------------------------------------+
|
||||
| 0 |
|
||||
+-------------------------------------------+
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
+---------------------+---------------+------------+-------+
|
||||
| greptime_timestamp | namespace | env | total |
|
||||
+---------------------+---------------+------------+-------+
|
||||
| 2023-05-15T10:00:00 | thermostat_v2 | production | 5289 |
|
||||
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
|
||||
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
|
||||
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
|
||||
| 2023-05-15T10:25:00 | thermostat_v2 | production | 5289 |
|
||||
| 2023-05-15T10:30:00 | thermostat_v2 | production | 5874 |
|
||||
| 2023-05-15T10:35:00 | thermostat_v2 | production | 6132 |
|
||||
| 2023-05-15T10:40:00 | thermostat_v2 | testing | 1287 |
|
||||
| 2023-05-15T10:45:00 | thermostat_v2 | testing | 1432 |
|
||||
| 2023-05-15T10:50:00 | thermostat_v2 | testing | 1056 |
|
||||
+---------------------+---------------+------------+-------+
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
+---------------------+---------------+-----+-------+
|
||||
| greptime_timestamp | namespace | env | total |
|
||||
+---------------------+---------------+-----+-------+
|
||||
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
|
||||
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
|
||||
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
|
||||
+---------------------+---------------+-----+-------+
|
||||
|
||||
DROP TABLE test_multi_pk_filter;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_null');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_null') |
|
||||
+-----------------------------------------+
|
||||
| 0 |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
|
||||
|
||||
+---------------+------------+-------+---------------------+
|
||||
| namespace | env | total | greptime_timestamp |
|
||||
+---------------+------------+-------+---------------------+
|
||||
| thermostat_v2 | production | 5289 | 2023-05-15T10:00:00 |
|
||||
| thermostat_v2 | production | 421 | 2023-05-15T10:05:00 |
|
||||
+---------------+------------+-------+---------------------+
|
||||
|
||||
DROP TABLE test_multi_pk_null;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
DROP TABLE test_multi_pk_filter;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_null');
|
||||
|
||||
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
|
||||
|
||||
DROP TABLE test_multi_pk_null;
|
||||
@@ -17,11 +17,14 @@ tql analyze (1, 3, '1s') t1{ a = "a" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 3_|
|
||||
+-+-+-+
|
||||
@@ -37,11 +40,14 @@ tql analyze (1, 3, '1s') t1{ a =~ ".*" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 6_|
|
||||
+-+-+-+
|
||||
@@ -57,11 +63,14 @@ tql analyze (1, 3, '1s') t1{ a =~ "a.*" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 3_|
|
||||
+-+-+-+
|
||||
|
||||
@@ -19,11 +19,14 @@ TQL ANALYZE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -41,11 +44,14 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -62,11 +68,14 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -85,11 +94,14 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -114,13 +126,23 @@ TQL ANALYZE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@1 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 1_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
@@ -144,9 +166,21 @@ TQL ANALYZE (0, 10, '5s') rate(test[10s]);
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 1_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
@@ -12,18 +12,13 @@ Affected Rows: 3
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN (0, 10, '5s') test;
|
||||
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- 'lookback' parameter is not fully supported, the test has to be updated
|
||||
-- explain at 0s, 5s and 10s. No point at 0s.
|
||||
@@ -31,36 +26,26 @@ TQL EXPLAIN (0, 10, '5s') test;
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN (0, 10, '1s', '2s') test;
|
||||
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[2000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[2000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- explain at 0s, 5s and 10s. No point at 0s.
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
|
||||
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- explain verbose at 0s, 5s and 10s. No point at 0s.
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
@@ -85,9 +70,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| logical_plan after expand_wildcard_rule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after resolve_grouping_function_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after type_coercion_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after DistPlannerAnalyzer_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivide: tags=["k"]_|
|
||||
|_|_Projection: test.i, test.j, test.k_|
|
||||
| logical_plan after DistPlannerAnalyzer_| Projection: test.i, test.j, test.k_|
|
||||
|_|_MergeScan [is_placeholder=false]_|
|
||||
| analyzed_logical_plan_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_|
|
||||
@@ -114,37 +97,45 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after optimize_projections_| MergeScan [is_placeholder=false]_|
|
||||
| logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after replace_distinct_aggregate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after decorrelate_predicate_subquery_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after scalar_subquery_to_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after extract_equijoin_predicate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_duplicated_expr_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_filter_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_cross_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_limit_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after propagate_empty_relation_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_one_union_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after filter_null_join_keys_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_outer_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after push_down_limit_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after push_down_filter_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after single_distinct_aggregation_to_group_by | SAME TEXT AS ABOVE_|
|
||||
| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after optimize_projections_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivide: tags=["k"]_|
|
||||
|_|_Projection: test.i, test.j, test.k_|
|
||||
|_|_MergeScan [is_placeholder=false]_|
|
||||
| initial_physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| logical_plan_| MergeScan [is_placeholder=false]_|
|
||||
| initial_physical_plan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| initial_physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] |
|
||||
|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| initial_physical_plan_with_stats_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| initial_physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| initial_physical_plan_with_schema_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after parallelize_scan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan after parallelize_scan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after PassDistributionRule_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OutputRequirements_| OutputRequirementExec_|
|
||||
|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after aggregate_statistics_| SAME TEXT AS ABOVE_|
|
||||
@@ -154,15 +145,9 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| physical_plan after CombinePartialFinalAggregate_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after EnforceSorting_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OptimizeAggregateOrder_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| OutputRequirementExec_|
|
||||
|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after coalesce_batches_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OutputRequirements_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan after OutputRequirements_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
@@ -171,17 +156,11 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| physical_plan after MatchesConstantTerm_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after RemoveDuplicateRule_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after SanityCheckPlan_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] |
|
||||
|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_with_stats_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_with_schema_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
+-+-+
|
||||
|
||||
|
||||
164
tests/cases/standalone/common/tql/partition.result
Normal file
164
tests/cases/standalone/common/tql/partition.result
Normal file
@@ -0,0 +1,164 @@
|
||||
-- no partition
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string primary key
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@3, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- partition on tag
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (k, l) (k < 'a', k >= 'a');
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@0 as j, prom_irate(j_range,i)@1 as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- partition on value
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (i) (i < 1.0, i >= 1.0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=32 REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
54
tests/cases/standalone/common/tql/partition.sql
Normal file
54
tests/cases/standalone/common/tql/partition.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- no partition
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string primary key
|
||||
);
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
|
||||
-- partition on tag
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (k, l) (k < 'a', k >= 'a');
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
|
||||
-- partition on value
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (i) (i < 1.0, i >= 1.0);
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
Reference in New Issue
Block a user