Compare commits

...

12 Commits

Author SHA1 Message Date
Weny Xu
5a99f098c5 test: add tests for region migration procedure (#2857)
* feat: add backward compatibility test for persistent ctx

* refactor: refactor State of region migration

* feat: add test utils for region migration tests

* test: add simple region migration tests

* chore: apply suggestions from CR
2023-12-08 08:47:09 +00:00
Ruihang Xia
7cf9945161 fix: re-enable ignored case test_query_prepared (#2892)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-08 08:35:56 +00:00
tison
bfb4794cfa fix: handle heartbeat shutdown gracefully (#2886)
* fix: handle heartbeat shutdown gracefully

Signed-off-by: tison <wander4096@gmail.com>

* improve logging

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2023-12-08 03:59:05 +00:00
Ruihang Xia
58183fe72f fix: align linear_regression to PromQL's behavior (#2879)
* fix: accept f64 and i64 as predict_linear's param

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use second instead of millisecond

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test to linear_regression

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-08 02:41:10 +00:00
Niwaka
09aa4b72a5 chore: update storage config example (#2887)
chore: update config example
2023-12-07 03:18:56 +00:00
dennis zhuang
43f32f4499 feat: impl date_add/date_sub functions (#2881)
* feat: adds date_add and date_sub function

* test: add date function

* fix: adds interval to date returns wrong result

* fix: header

* fix: typo

* fix: timestamp resolution

* fix: capacity

* chore: apply suggestion

* fix: wrong behavior when adding intervals to timestamp, date and datetime

* chore: remove unused error

* test: refactor and add some tests
2023-12-07 03:02:15 +00:00
tison
ea80570cb1 fix: mysql version function result (#2884)
Signed-off-by: tison <wander4096@gmail.com>
2023-12-06 16:14:09 +00:00
Niwaka
cfe3a2c55e feat!: support table ddl for custom storage (#2733)
* feat: support table ddl for custom_storage

* refactor: rename extract_variant_name to name

* chore: add blank

* chore: keep compatible

* feat: rename custom_stores to providers

* chore: rename

* chore: config

* refactor: add should_retry in client Error

* fix: test fail

* chore: remove unused options

* chore: remove unused import

* chore: remove the blanks.

* chore: revert

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-12-06 15:59:01 +00:00
Ruihang Xia
2cca267a32 chore: tweak status code of promql errors (#2883)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-06 13:50:53 +00:00
tison
f74715ce52 refactor: RegionEngine::handle_request always returns affected rows (#2874)
* refactor: RegionEngine::handle_request -> handle_execution

Signed-off-by: tison <wander4096@gmail.com>

* propagate refactor

Signed-off-by: tison <wander4096@gmail.com>

* revert spell change

Signed-off-by: tison <wander4096@gmail.com>

* propagate refactor

Signed-off-by: tison <wander4096@gmail.com>

* cargo clippy

Signed-off-by: tison <wander4096@gmail.com>

* propagate refactor

Signed-off-by: tison <wander4096@gmail.com>

* cargo fmt

Signed-off-by: tison <wander4096@gmail.com>

* more name clarification

Signed-off-by: tison <wander4096@gmail.com>

* revert rename

Signed-off-by: tison <wander4096@gmail.com>

* wrap affected rows into RegionResponse

Signed-off-by: tison <wander4096@gmail.com>

* flatten return AffectedRows

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2023-12-06 13:27:19 +00:00
Weny Xu
1141dbe946 chore: unify the meta metrics styling (#2875)
* chore: unify the meta metrics styling

* chore: apply suggestions from CR
2023-12-06 09:20:41 +00:00
ZonaHe
a415685bf1 feat: update dashboard to v0.4.2 (#2882)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-12-06 09:13:02 +00:00
86 changed files with 2492 additions and 405 deletions

View File

@@ -43,6 +43,7 @@ sync_write = false
[storage]
# The working home directory.
data_home = "/tmp/greptimedb/"
# Storage type.
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
@@ -53,6 +54,12 @@ type = "File"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
# Custom storage options
#[[storage.providers]]
#type = "S3"
#[[storage.providers]]
#type = "Gcs"
# Mito engine options
[[region_engine]]
[region_engine.mito]

View File

@@ -122,6 +122,12 @@ type = "File"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
# Custom storage options
#[[storage.providers]]
#type = "S3"
#[[storage.providers]]
#type = "Gcs"
# Mito engine options
[[region_engine]]
[region_engine.mito]

View File

@@ -131,3 +131,15 @@ impl From<Status> for Error {
Self::Server { code, msg }
}
}
impl Error {
pub fn should_retry(&self) -> bool {
!matches!(
self,
Self::RegionServer {
code: Code::InvalidArgument,
..
}
)
}
}

View File

@@ -29,7 +29,6 @@ use prost::Message;
use snafu::{location, Location, OptionExt, ResultExt};
use tokio_stream::StreamExt;
use crate::error::Error::RegionServer;
use crate::error::{
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
MissingFieldSnafu, Result, ServerSnafu,
@@ -45,7 +44,7 @@ pub struct RegionRequester {
impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
self.handle_inner(request).await.map_err(|err| {
if matches!(err, RegionServer { .. }) {
if err.should_retry() {
meta_error::Error::RetryLater {
source: BoxedError::new(err),
}

View File

@@ -214,7 +214,7 @@ mod tests {
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::config::{FileConfig, ObjectStoreConfig};
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
use servers::heartbeat_options::HeartbeatOptions;
use servers::Mode;
@@ -251,8 +251,17 @@ mod tests {
sync_write = false
[storage]
type = "File"
data_home = "/tmp/greptimedb/"
type = "File"
[[storage.providers]]
type = "Gcs"
bucket = "foo"
endpoint = "bar"
[[storage.providers]]
type = "S3"
bucket = "foo"
[logging]
level = "debug"
@@ -305,6 +314,15 @@ mod tests {
&options.storage.store,
ObjectStoreConfig::File(FileConfig { .. })
));
assert_eq!(options.storage.providers.len(), 2);
assert!(matches!(
options.storage.providers[0],
ObjectStoreConfig::Gcs(GcsConfig { .. })
));
assert!(matches!(
options.storage.providers[1],
ObjectStoreConfig::S3(S3Config { .. })
));
assert_eq!("debug", options.logging.level.unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);

View File

@@ -238,7 +238,7 @@ mod tests {
.unwrap();
// Check the configs from environment variables.
match opts.storage.store {
match &opts.storage.store {
ObjectStoreConfig::S3(s3_config) => {
assert_eq!(s3_config.bucket, "mybucket".to_string());
}

View File

@@ -426,6 +426,7 @@ mod tests {
use auth::{Identity, Password, UserProviderRef};
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::config::{FileConfig, GcsConfig};
use servers::Mode;
use super::*;
@@ -473,8 +474,16 @@ mod tests {
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage]
data_home = "/tmp/greptimedb/"
type = "File"
[[storage.providers]]
type = "Gcs"
bucket = "foo"
endpoint = "bar"
[[storage.providers]]
type = "S3"
access_key_id = "access_key_id"
secret_access_key = "secret_access_key"
@@ -524,7 +533,16 @@ mod tests {
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
match &dn_opts.storage.store {
assert!(matches!(
&dn_opts.storage.store,
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
));
assert_eq!(dn_opts.storage.providers.len(), 2);
assert!(matches!(
dn_opts.storage.providers[0],
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
));
match &dn_opts.storage.providers[1] {
datanode::config::ObjectStoreConfig::S3(s3_config) => {
assert_eq!(
"Secret([REDACTED alloc::string::String])".to_string(),

View File

@@ -0,0 +1,29 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_query::prelude::{Signature, TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
/// Create a function signature with oneof signatures of interleaving two arguments.
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
let mut sigs = Vec::with_capacity(args1.len() * args2.len());
for arg1 in &args1 {
for arg2 in &args2 {
sigs.push(TypeSignature::Exact(vec![arg1.clone(), arg2.clone()]));
}
}
Signature::one_of(sigs, Volatility::Immutable)
}

View File

@@ -13,3 +13,5 @@
// limitations under the License.
pub mod scalars;
pub mod helper;

View File

@@ -13,6 +13,7 @@
// limitations under the License.
pub mod aggregate;
mod date;
pub mod expression;
pub mod function;
pub mod function_registry;

View File

@@ -0,0 +1,31 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod date_add;
mod date_sub;
use date_add::DateAddFunction;
use date_sub::DateSubFunction;
use crate::scalars::function_registry::FunctionRegistry;
pub(crate) struct DateFunction;
impl DateFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(DateAddFunction));
registry.register(Arc::new(DateSubFunction));
}
}

View File

@@ -0,0 +1,279 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::Signature;
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
use crate::helper;
use crate::scalars::function::{Function, FunctionContext};
/// A function adds an interval value to Timestamp, Date or DateTime, and return the result.
#[derive(Clone, Debug, Default)]
pub struct DateAddFunction;
const NAME: &str = "date_add";
impl Function for DateAddFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(input_types[0].clone())
}
fn signature(&self) -> Signature {
helper::one_of_sigs2(
vec![
ConcreteDataType::date_datatype(),
ConcreteDataType::datetime_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
],
vec![
ConcreteDataType::interval_month_day_nano_datatype(),
ConcreteDataType::interval_year_month_datatype(),
ConcreteDataType::interval_day_time_datatype(),
],
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 2, have: {}",
columns.len()
),
}
);
let left = &columns[0];
let right = &columns[1];
let size = left.len();
let left_datatype = columns[0].data_type();
match left_datatype {
ConcreteDataType::Timestamp(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let ts = left.get(i).as_timestamp();
let interval = right.get(i).as_interval();
let new_ts = match (ts, interval) {
(Some(ts), Some(interval)) => ts.add_interval(interval),
_ => ts,
};
result.push_value_ref(ValueRef::from(new_ts));
}
Ok(result.to_vector())
}
ConcreteDataType::Date(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let date = left.get(i).as_date();
let interval = right.get(i).as_interval();
let new_date = match (date, interval) {
(Some(date), Some(interval)) => date.add_interval(interval),
_ => date,
};
result.push_value_ref(ValueRef::from(new_date));
}
Ok(result.to_vector())
}
ConcreteDataType::DateTime(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let datetime = left.get(i).as_datetime();
let interval = right.get(i).as_interval();
let new_datetime = match (datetime, interval) {
(Some(datetime), Some(interval)) => datetime.add_interval(interval),
_ => datetime,
};
result.push_value_ref(ValueRef::from(new_datetime));
}
Ok(result.to_vector())
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for DateAddFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DATE_ADD")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_query::prelude::{TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
TimestampSecondVector,
};
use super::{DateAddFunction, *};
use crate::scalars::Function;
#[test]
fn test_date_add_misc() {
let f = DateAddFunction;
assert_eq!("date_add", f.name());
assert_eq!(
ConcreteDataType::timestamp_microsecond_datatype(),
f.return_type(&[ConcreteDataType::timestamp_microsecond_datatype()])
.unwrap()
);
assert_eq!(
ConcreteDataType::timestamp_second_datatype(),
f.return_type(&[ConcreteDataType::timestamp_second_datatype()])
.unwrap()
);
assert_eq!(
ConcreteDataType::date_datatype(),
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
);
assert_eq!(
ConcreteDataType::datetime_datatype(),
f.return_type(&[ConcreteDataType::datetime_datatype()])
.unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
} if sigs.len() == 18));
}
#[test]
fn test_timestamp_date_add() {
let f = DateAddFunction;
let times = vec![Some(123), None, Some(42), None];
// Intervals in milliseconds
let intervals = vec![1000, 2000, 3000, 1000];
let results = [Some(124), None, Some(45), None];
let time_vector = TimestampSecondVector::from(times.clone());
let interval_vector = IntervalDayTimeVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(time_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), result.unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_date_date_add() {
let f = DateAddFunction;
let dates = vec![Some(123), None, Some(42), None];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(154), None, Some(131), None];
let date_vector = DateVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Date(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_datetime_date_add() {
let f = DateAddFunction;
let dates = vec![Some(123), None, Some(42), None];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(2678400123), None, Some(7776000042), None];
let date_vector = DateTimeVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::DateTime(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -0,0 +1,292 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::Signature;
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
use crate::helper;
use crate::scalars::function::{Function, FunctionContext};
/// A function subtracts an interval value to Timestamp, Date or DateTime, and return the result.
#[derive(Clone, Debug, Default)]
pub struct DateSubFunction;
const NAME: &str = "date_sub";
impl Function for DateSubFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(input_types[0].clone())
}
fn signature(&self) -> Signature {
helper::one_of_sigs2(
vec![
ConcreteDataType::date_datatype(),
ConcreteDataType::datetime_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
],
vec![
ConcreteDataType::interval_month_day_nano_datatype(),
ConcreteDataType::interval_year_month_datatype(),
ConcreteDataType::interval_day_time_datatype(),
],
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 2, have: {}",
columns.len()
),
}
);
let left = &columns[0];
let right = &columns[1];
let size = left.len();
let left_datatype = columns[0].data_type();
match left_datatype {
ConcreteDataType::Timestamp(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let ts = left.get(i).as_timestamp();
let interval = right.get(i).as_interval();
let new_ts = match (ts, interval) {
(Some(ts), Some(interval)) => ts.sub_interval(interval),
_ => ts,
};
result.push_value_ref(ValueRef::from(new_ts));
}
Ok(result.to_vector())
}
ConcreteDataType::Date(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let date = left.get(i).as_date();
let interval = right.get(i).as_interval();
let new_date = match (date, interval) {
(Some(date), Some(interval)) => date.sub_interval(interval),
_ => date,
};
result.push_value_ref(ValueRef::from(new_date));
}
Ok(result.to_vector())
}
ConcreteDataType::DateTime(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let datetime = left.get(i).as_datetime();
let interval = right.get(i).as_interval();
let new_datetime = match (datetime, interval) {
(Some(datetime), Some(interval)) => datetime.sub_interval(interval),
_ => datetime,
};
result.push_value_ref(ValueRef::from(new_datetime));
}
Ok(result.to_vector())
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for DateSubFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DATE_SUB")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_query::prelude::{TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
TimestampSecondVector,
};
use super::{DateSubFunction, *};
use crate::scalars::Function;
#[test]
fn test_date_sub_misc() {
let f = DateSubFunction;
assert_eq!("date_sub", f.name());
assert_eq!(
ConcreteDataType::timestamp_microsecond_datatype(),
f.return_type(&[ConcreteDataType::timestamp_microsecond_datatype()])
.unwrap()
);
assert_eq!(
ConcreteDataType::timestamp_second_datatype(),
f.return_type(&[ConcreteDataType::timestamp_second_datatype()])
.unwrap()
);
assert_eq!(
ConcreteDataType::date_datatype(),
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
);
assert_eq!(
ConcreteDataType::datetime_datatype(),
f.return_type(&[ConcreteDataType::datetime_datatype()])
.unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
} if sigs.len() == 18));
}
#[test]
fn test_timestamp_date_sub() {
let f = DateSubFunction;
let times = vec![Some(123), None, Some(42), None];
// Intervals in milliseconds
let intervals = vec![1000, 2000, 3000, 1000];
let results = [Some(122), None, Some(39), None];
let time_vector = TimestampSecondVector::from(times.clone());
let interval_vector = IntervalDayTimeVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(time_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), result.unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_date_date_sub() {
let f = DateSubFunction;
let days_per_month = 30;
let dates = vec![
Some(123 * days_per_month),
None,
Some(42 * days_per_month),
None,
];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(3659), None, Some(1168), None];
let date_vector = DateVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Date(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
#[test]
fn test_datetime_date_sub() {
let f = DateSubFunction;
let millis_per_month = 3600 * 24 * 30 * 1000;
let dates = vec![
Some(123 * millis_per_month),
None,
Some(42 * millis_per_month),
None,
];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(316137600000), None, Some(100915200000), None];
let date_vector = DateTimeVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::DateTime(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -19,6 +19,7 @@ use std::sync::{Arc, RwLock};
use once_cell::sync::Lazy;
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
use crate::scalars::date::DateFunction;
use crate::scalars::function::FunctionRef;
use crate::scalars::math::MathFunction;
use crate::scalars::numpy::NumpyFunction;
@@ -75,6 +76,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
MathFunction::register(&function_registry);
NumpyFunction::register(&function_registry);
TimestampFunction::register(&function_registry);
DateFunction::register(&function_registry);
AggregateFunctions::register(&function_registry);

View File

@@ -56,6 +56,14 @@ impl TableRouteValue {
version: self.version + 1,
}
}
/// Returns the version.
///
/// For test purpose.
#[cfg(any(tets, feature = "testing"))]
pub fn version(&self) -> u64 {
self.version
}
}
impl TableMetaKey for TableRouteKey {

View File

@@ -15,12 +15,13 @@
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use chrono::{Datelike, NaiveDate};
use chrono::{Datelike, Days, Months, NaiveDate};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::error::{Error, ParseDateStrSnafu, Result};
use crate::interval::Interval;
const UNIX_EPOCH_FROM_CE: i32 = 719_163;
@@ -86,6 +87,32 @@ impl Date {
pub fn to_secs(&self) -> i64 {
(self.0 as i64) * 24 * 3600
}
/// Adds given Interval to the current date.
/// Returns None if the resulting date would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
let (months, days, _) = interval.to_month_day_nano();
naive_date
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))
.map(Into::into)
}
/// Subtracts given Interval to the current date.
/// Returns None if the resulting date would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
let (months, days, _) = interval.to_month_day_nano();
naive_date
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))
.map(Into::into)
}
}
#[cfg(test)]
@@ -124,6 +151,18 @@ mod tests {
assert_eq!(now, Date::from_str(&now).unwrap().to_string());
}
#[test]
fn test_add_sub_interval() {
let date = Date::new(1000);
let interval = Interval::from_year_month(3);
let new_date = date.add_interval(interval).unwrap();
assert_eq!(new_date.val(), 1091);
assert_eq!(date, new_date.sub_interval(interval).unwrap());
}
#[test]
pub fn test_min_max() {
let mut date = Date::from_str("9999-12-31").unwrap();

View File

@@ -14,14 +14,15 @@
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::time::Duration;
use chrono::{LocalResult, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
use crate::error::{Error, InvalidDateStrSnafu, Result};
use crate::timezone::TimeZone;
use crate::util::{format_utc_datetime, local_datetime_to_utc};
use crate::Date;
use crate::{Date, Interval};
const DATETIME_FORMAT: &str = "%F %T";
const DATETIME_FORMAT_WITH_TZ: &str = "%F %T%z";
@@ -117,6 +118,33 @@ impl DateTime {
None => Utc.from_utc_datetime(&v).naive_local(),
})
}
/// Adds given Interval to the current datetime.
/// Returns None if the resulting datetime would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))?
+ Duration::from_nanos(nsecs as u64);
Some(naive_datetime.into())
}
/// Subtracts given Interval to the current datetime.
/// Returns None if the resulting datetime would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))?
- Duration::from_nanos(nsecs as u64);
Some(naive_datetime.into())
}
/// Convert to [common_time::date].
pub fn to_date(&self) -> Option<Date> {
@@ -152,6 +180,18 @@ mod tests {
assert_eq!(42, d.val());
}
#[test]
fn test_add_sub_interval() {
let datetime = DateTime::new(1000);
let interval = Interval::from_day_time(1, 200);
let new_datetime = datetime.add_interval(interval).unwrap();
assert_eq!(new_datetime.val(), 1000 + 3600 * 24 * 1000 + 200);
assert_eq!(datetime, new_datetime.sub_interval(interval).unwrap());
}
#[test]
fn test_parse_local_date_time() {
std::env::set_var("TZ", "Asia/Shanghai");

View File

@@ -20,6 +20,10 @@ use std::hash::{Hash, Hasher};
use arrow::datatypes::IntervalUnit as ArrowIntervalUnit;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::duration::Duration;
use crate::error::{Result, TimestampOverflowSnafu};
#[derive(
Debug, Default, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize,
@@ -63,7 +67,7 @@ impl From<ArrowIntervalUnit> for IntervalUnit {
/// month-day-nano, which will be stored in the following format.
/// Interval data format:
/// | months | days | nsecs |
/// | 4bytes | 4bytes | 8bytes |
/// | 4bytes | 4bytes | 8bytes |
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Interval {
months: i32,
@@ -114,6 +118,14 @@ impl Interval {
}
}
pub fn to_duration(&self) -> Result<Duration> {
Ok(Duration::new_nanosecond(
self.to_nanosecond()
.try_into()
.context(TimestampOverflowSnafu)?,
))
}
/// Return a tuple(months, days, nanoseconds) from the interval.
pub fn to_month_day_nano(&self) -> (i32, i32, i64) {
(self.months, self.days, self.nsecs)
@@ -558,6 +570,7 @@ mod tests {
use std::collections::HashMap;
use super::*;
use crate::timestamp::TimeUnit;
#[test]
fn test_from_year_month() {
@@ -572,6 +585,21 @@ mod tests {
assert_eq!(interval.nsecs, 2_000_000);
}
#[test]
fn test_to_duration() {
let interval = Interval::from_day_time(1, 2);
let duration = interval.to_duration().unwrap();
assert_eq!(86400002000000, duration.value());
assert_eq!(TimeUnit::Nanosecond, duration.unit());
let interval = Interval::from_year_month(12);
let duration = interval.to_duration().unwrap();
assert_eq!(31104000000000000, duration.value());
assert_eq!(TimeUnit::Nanosecond, duration.unit());
}
#[test]
fn test_interval_is_positive() {
let interval = Interval::from_year_month(1);

View File

@@ -21,15 +21,16 @@ use std::time::Duration;
use arrow::datatypes::TimeUnit as ArrowTimeUnit;
use chrono::{
DateTime, LocalResult, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc,
DateTime, Days, LocalResult, Months, NaiveDate, NaiveDateTime, NaiveTime,
TimeZone as ChronoTimeZone, Utc,
};
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
use crate::timezone::TimeZone;
use crate::util::{div_ceil, format_utc_datetime, local_datetime_to_utc};
use crate::{error, Interval};
/// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed
/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC time zone):
@@ -104,6 +105,78 @@ impl Timestamp {
})
}
/// Adds a duration to timestamp.
/// # Note
/// The result time unit remains unchanged even if `duration` has a different unit with `self`.
/// For example, a timestamp with value 1 and time unit second, subtracted by 1 millisecond
/// and the result is still 1 second.
pub fn add_duration(&self, duration: Duration) -> error::Result<Self> {
let duration: i64 = match self.unit {
TimeUnit::Second => {
i64::try_from(duration.as_secs()).context(TimestampOverflowSnafu)?
}
TimeUnit::Millisecond => {
i64::try_from(duration.as_millis()).context(TimestampOverflowSnafu)?
}
TimeUnit::Microsecond => {
i64::try_from(duration.as_micros()).context(TimestampOverflowSnafu)?
}
TimeUnit::Nanosecond => {
i64::try_from(duration.as_nanos()).context(TimestampOverflowSnafu)?
}
};
let value = self
.value
.checked_add(duration)
.with_context(|| ArithmeticOverflowSnafu {
msg: format!(
"Try to add timestamp: {:?} with duration: {:?}",
self, duration
),
})?;
Ok(Timestamp {
value,
unit: self.unit,
})
}
/// Adds given Interval to the current timestamp.
/// Returns None if the resulting timestamp would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))?
+ Duration::from_nanos(nsecs as u64);
match Timestamp::from_chrono_datetime(naive_datetime) {
// Have to convert the new timestamp by the current unit.
Some(ts) => ts.convert_to(self.unit),
None => None,
}
}
/// Subtracts given Interval to the current timestamp.
/// Returns None if the resulting timestamp would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))?
- Duration::from_nanos(nsecs as u64);
match Timestamp::from_chrono_datetime(naive_datetime) {
// Have to convert the new timestamp by the current unit.
Some(ts) => ts.convert_to(self.unit),
None => None,
}
}
/// Subtracts current timestamp with another timestamp, yielding a duration.
pub fn sub(&self, rhs: &Self) -> Option<chrono::Duration> {
let lhs = self.to_chrono_datetime()?;
@@ -543,6 +616,19 @@ mod tests {
Timestamp::new(value, unit)
}
#[test]
fn test_add_sub_interval() {
let ts = Timestamp::new(1000, TimeUnit::Millisecond);
let interval = Interval::from_day_time(1, 200);
let new_ts = ts.add_interval(interval).unwrap();
assert_eq!(new_ts.unit(), TimeUnit::Millisecond);
assert_eq!(new_ts.value(), 1000 + 3600 * 24 * 1000 + 200);
assert_eq!(ts, new_ts.sub_interval(interval).unwrap());
}
#[test]
fn test_timestamp_reflexivity() {
for _ in 0..1000 {
@@ -1006,6 +1092,33 @@ mod tests {
assert_eq!(TimeUnit::Second, res.unit);
}
#[test]
fn test_timestamp_add() {
let res = Timestamp::new(1, TimeUnit::Second)
.add_duration(Duration::from_secs(1))
.unwrap();
assert_eq!(2, res.value);
assert_eq!(TimeUnit::Second, res.unit);
let res = Timestamp::new(0, TimeUnit::Second)
.add_duration(Duration::from_secs(1))
.unwrap();
assert_eq!(1, res.value);
assert_eq!(TimeUnit::Second, res.unit);
let res = Timestamp::new(1, TimeUnit::Second)
.add_duration(Duration::from_millis(1))
.unwrap();
assert_eq!(1, res.value);
assert_eq!(TimeUnit::Second, res.unit);
let res = Timestamp::new(100, TimeUnit::Second)
.add_duration(Duration::from_millis(1000))
.unwrap();
assert_eq!(101, res.value);
assert_eq!(TimeUnit::Second, res.unit);
}
#[test]
fn test_parse_in_time_zone() {
std::env::set_var("TZ", "Asia/Shanghai");

View File

@@ -48,6 +48,18 @@ pub enum ObjectStoreConfig {
Gcs(GcsConfig),
}
impl ObjectStoreConfig {
pub fn name(&self) -> &'static str {
match self {
Self::File(_) => "File",
Self::S3(_) => "S3",
Self::Oss(_) => "Oss",
Self::Azblob(_) => "Azblob",
Self::Gcs(_) => "Gcs",
}
}
}
/// Storage engine config
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
@@ -63,6 +75,7 @@ pub struct StorageConfig {
pub data_home: String,
#[serde(flatten)]
pub store: ObjectStoreConfig,
pub providers: Vec<ObjectStoreConfig>,
}
impl Default for StorageConfig {
@@ -71,6 +84,7 @@ impl Default for StorageConfig {
global_ttl: None,
data_home: DEFAULT_DATA_HOME.to_string(),
store: ObjectStoreConfig::default(),
providers: vec![],
}
}
}
@@ -295,7 +309,7 @@ mod tests {
secret_access_key = "secret_access_key"
"#;
let opts: DatanodeOptions = toml::from_str(toml_str).unwrap();
match opts.storage.store {
match &opts.storage.store {
ObjectStoreConfig::S3(cfg) => {
assert_eq!(
"Secret([REDACTED alloc::string::String])".to_string(),

View File

@@ -354,7 +354,6 @@ impl DatanodeBuilder {
));
}
}
info!("going to open {} regions", regions.len());
let semaphore = Arc::new(tokio::sync::Semaphore::new(OPEN_REGION_PARALLELISM));
let mut tasks = vec![];
@@ -417,7 +416,6 @@ impl DatanodeBuilder {
);
let table_provider_factory = Arc::new(DummyTableProviderFactory);
let mut region_server = RegionServer::with_table_provider(
query_engine,
runtime,
@@ -425,13 +423,8 @@ impl DatanodeBuilder {
table_provider_factory,
);
let object_store = store::new_object_store(opts).await?;
let object_store_manager = ObjectStoreManager::new(
"default", // TODO: use a name which is set in the configuration when #919 is done.
object_store,
);
let engines =
Self::build_store_engines(opts, log_store, Arc::new(object_store_manager)).await?;
let object_store_manager = Self::build_object_store_manager(opts).await?;
let engines = Self::build_store_engines(opts, log_store, object_store_manager).await?;
for engine in engines {
region_server.register_engine(engine);
}
@@ -496,6 +489,21 @@ impl DatanodeBuilder {
}
Ok(engines)
}
/// Builds [ObjectStoreManager]
async fn build_object_store_manager(opts: &DatanodeOptions) -> Result<ObjectStoreManagerRef> {
let object_store =
store::new_object_store(opts.storage.store.clone(), &opts.storage.data_home).await?;
let default_name = opts.storage.store.name();
let mut object_store_manager = ObjectStoreManager::new(default_name, object_store);
for store in &opts.storage.providers {
object_store_manager.add(
store.name(),
store::new_object_store(store.clone(), &opts.storage.data_home).await?,
);
}
Ok(Arc::new(object_store_manager))
}
}
#[cfg(test)]

View File

@@ -50,7 +50,7 @@ use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
use store_api::region_request::{RegionCloseRequest, RegionRequest};
use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::table::scan::StreamScanAdapter;
@@ -112,7 +112,7 @@ impl RegionServer {
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
self.inner.handle_request(region_id, request).await
}
@@ -209,13 +209,7 @@ impl RegionServerHandler for RegionServer {
// only insert/delete will have multiple results.
let mut affected_rows = 0;
for result in results {
match result {
Output::AffectedRows(rows) => affected_rows += rows,
Output::Stream(_) | Output::RecordBatches(_) => {
// TODO: change the output type to only contains `affected_rows`
unreachable!()
}
}
affected_rows += result;
}
Ok(RegionResponse {
@@ -294,7 +288,7 @@ impl RegionServerInner {
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
let request_type = request.request_type();
let _timer = crate::metrics::HANDLE_REGION_REQUEST_ELAPSED
.with_label_values(&[request_type])

View File

@@ -32,12 +32,15 @@ use object_store::util::normalize_dir;
use object_store::{util, HttpClient, ObjectStore, ObjectStoreBuilder};
use snafu::prelude::*;
use crate::config::{DatanodeOptions, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
use crate::error::{self, Result};
pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectStore> {
let data_home = normalize_dir(&opts.storage.data_home);
let object_store = match &opts.storage.store {
pub(crate) async fn new_object_store(
store: ObjectStoreConfig,
data_home: &str,
) -> Result<ObjectStore> {
let data_home = normalize_dir(data_home);
let object_store = match &store {
ObjectStoreConfig::File(file_config) => {
fs::new_fs_object_store(&data_home, file_config).await
}
@@ -50,9 +53,8 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectSto
}?;
// Enable retry layer and cache layer for non-fs object storages
let object_store = if !matches!(opts.storage.store, ObjectStoreConfig::File(..)) {
let object_store =
create_object_store_with_cache(object_store, &opts.storage.store).await?;
let object_store = if !matches!(store, ObjectStoreConfig::File(..)) {
let object_store = create_object_store_with_cache(object_store, &store).await?;
object_store.layer(RetryLayer::new().with_jitter())
} else {
object_store

View File

@@ -31,7 +31,7 @@ use query::QueryEngine;
use session::context::QueryContextRef;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_request::RegionRequest;
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use table::TableRef;
use tokio::sync::mpsc::{Receiver, Sender};
@@ -109,10 +109,9 @@ impl RegionEngine for MockRegionEngine {
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output, BoxedError> {
) -> Result<AffectedRows, BoxedError> {
let _ = self.sender.send((region_id, request)).await;
Ok(Output::AffectedRows(0))
Ok(0)
}
async fn handle_query(

View File

@@ -210,6 +210,14 @@ impl Value {
}
}
/// Cast Value to Interval. Return None if value is not a valid interval data type.
pub fn as_interval(&self) -> Option<Interval> {
match self {
Value::Interval(i) => Some(*i),
_ => None,
}
}
/// Cast Value to Date. Return None if value is not a valid date data type.
pub fn as_date(&self) -> Option<Date> {
match self {

View File

@@ -18,7 +18,6 @@ use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::FILE_ENGINE;
use common_error::ext::BoxedError;
use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::{error, info};
use object_store::ObjectStore;
@@ -26,7 +25,8 @@ use snafu::{ensure, OptionExt};
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_request::{
RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest, RegionRequest,
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
RegionRequest,
};
use store_api::storage::{RegionId, ScanRequest};
use tokio::sync::Mutex;
@@ -59,7 +59,7 @@ impl RegionEngine for FileRegionEngine {
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output, BoxedError> {
) -> Result<AffectedRows, BoxedError> {
self.inner
.handle_request(region_id, request)
.await
@@ -149,7 +149,7 @@ impl EngineInner {
&self,
region_id: RegionId,
request: RegionRequest,
) -> EngineResult<Output> {
) -> EngineResult<AffectedRows> {
match request {
RegionRequest::Create(req) => self.handle_create(region_id, req).await,
RegionRequest::Drop(req) => self.handle_drop(region_id, req).await,
@@ -187,7 +187,7 @@ impl EngineInner {
&self,
region_id: RegionId,
request: RegionCreateRequest,
) -> EngineResult<Output> {
) -> EngineResult<AffectedRows> {
ensure!(
request.engine == FILE_ENGINE,
UnexpectedEngineSnafu {
@@ -196,7 +196,7 @@ impl EngineInner {
);
if self.exists(region_id).await {
return Ok(Output::AffectedRows(0));
return Ok(0);
}
info!("Try to create region, region_id: {}", region_id);
@@ -204,7 +204,7 @@ impl EngineInner {
let _lock = self.region_mutex.lock().await;
// Check again after acquiring the lock
if self.exists(region_id).await {
return Ok(Output::AffectedRows(0));
return Ok(0);
}
let res = FileRegion::create(region_id, request, &self.object_store).await;
@@ -217,16 +217,16 @@ impl EngineInner {
self.regions.write().unwrap().insert(region_id, region);
info!("A new region is created, region_id: {}", region_id);
Ok(Output::AffectedRows(0))
Ok(0)
}
async fn handle_open(
&self,
region_id: RegionId,
request: RegionOpenRequest,
) -> EngineResult<Output> {
) -> EngineResult<AffectedRows> {
if self.exists(region_id).await {
return Ok(Output::AffectedRows(0));
return Ok(0);
}
info!("Try to open region, region_id: {}", region_id);
@@ -234,7 +234,7 @@ impl EngineInner {
let _lock = self.region_mutex.lock().await;
// Check again after acquiring the lock
if self.exists(region_id).await {
return Ok(Output::AffectedRows(0));
return Ok(0);
}
let res = FileRegion::open(region_id, request, &self.object_store).await;
@@ -247,14 +247,14 @@ impl EngineInner {
self.regions.write().unwrap().insert(region_id, region);
info!("Region opened, region_id: {}", region_id);
Ok(Output::AffectedRows(0))
Ok(0)
}
async fn handle_close(
&self,
region_id: RegionId,
_request: RegionCloseRequest,
) -> EngineResult<Output> {
) -> EngineResult<AffectedRows> {
let _lock = self.region_mutex.lock().await;
let mut regions = self.regions.write().unwrap();
@@ -262,14 +262,14 @@ impl EngineInner {
info!("Region closed, region_id: {}", region_id);
}
Ok(Output::AffectedRows(0))
Ok(0)
}
async fn handle_drop(
&self,
region_id: RegionId,
_request: RegionDropRequest,
) -> EngineResult<Output> {
) -> EngineResult<AffectedRows> {
if !self.exists(region_id).await {
return RegionNotFoundSnafu { region_id }.fail();
}
@@ -291,7 +291,7 @@ impl EngineInner {
let _ = self.regions.write().unwrap().remove(&region_id);
info!("Region dropped, region_id: {}", region_id);
Ok(Output::AffectedRows(0))
Ok(0)
}
async fn get_region(&self, region_id: RegionId) -> Option<FileRegionRef> {

View File

@@ -16,37 +16,45 @@ use lazy_static::lazy_static;
use prometheus::*;
lazy_static! {
pub static ref METRIC_META_KV_REQUEST: HistogramVec = register_histogram_vec!(
"meta_kv_request",
/// Elapsed time to responding kv requests.
pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
"meta_kv_request_elapsed",
"meta kv request",
&["target", "op", "cluster_id"]
)
.unwrap();
/// The heartbeat connection gauge.
pub static ref METRIC_META_HEARTBEAT_CONNECTION_NUM: IntGauge = register_int_gauge!(
"meta_heartbeat_connection_num",
"meta heartbeat connection num"
)
.unwrap();
/// Elapsed time to execution of heartbeat handlers.
pub static ref METRIC_META_HANDLER_EXECUTE: HistogramVec =
register_histogram_vec!("meta_handler_execute", "meta handler execute", &["name"]).unwrap();
/// Inactive region gauge.
pub static ref METRIC_META_INACTIVE_REGIONS: IntGauge =
register_int_gauge!("meta_inactive_regions", "meta inactive regions").unwrap();
pub static ref METRIC_META_LEADER_CACHED_KV_LOAD: HistogramVec =
/// Elapsed time to leader cache kv.
pub static ref METRIC_META_LEADER_CACHED_KV_LOAD_ELAPSED: HistogramVec =
register_histogram_vec!("meta_leader_cache_kv_load", "meta load cache", &["prefix"])
.unwrap();
pub static ref METRIC_META_LOAD_FOLLOWER_METADATA: Histogram = register_histogram!(
"meta_load_follower_metadata",
/// Elapsed time to load follower region metadata.
pub static ref METRIC_META_LOAD_FOLLOWER_METADATA_ELAPSED: Histogram = register_histogram!(
"meta_load_follower_metadata_elapsed",
"meta load follower regions metadata elapsed"
)
.unwrap();
pub static ref METRIC_META_LOAD_LEADER_METADATA: Histogram = register_histogram!(
"meta_load_leader_metadata",
/// Elapsed time to load leader region metadata.
pub static ref METRIC_META_LOAD_LEADER_METADATA_ELAPSED: Histogram = register_histogram!(
"meta_load_leader_metadata_elapsed",
"meta load leader regions metadata elapsed"
)
.unwrap();
pub static ref METRIC_META_KV_CACHE_BATCH_GET_HIT_RATE: Gauge = register_gauge!(
"meta_kv_cache_batch_get_hit_rate",
"meta kv cache batch get hit rate"
)
.unwrap();
/// Meta kv cache hit counter.
pub static ref METRIC_META_KV_CACHE_HIT: IntCounterVec =
register_int_counter_vec!("meta_kv_cache_hit", "meta kv cache hit", &["op"]).unwrap();
/// Meta kv cache miss counter.
pub static ref METRIC_META_KV_CACHE_MISS: IntCounterVec =
register_int_counter_vec!("meta_kv_cache_miss", "meta kv cache miss", &["op"]).unwrap();
}

View File

@@ -53,7 +53,7 @@ use crate::service::mailbox::{BroadcastChannel, MailboxRef};
/// It will only be updated/stored after the Red node has succeeded.
///
/// **Notes: Stores with too large data in the context might incur replication overhead.**
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PersistentContext {
/// The Id of the cluster.
cluster_id: ClusterId,
@@ -263,14 +263,9 @@ impl Context {
#[async_trait::async_trait]
#[typetag::serde(tag = "region_migration_state")]
trait State: Sync + Send + Debug {
/// Yields the next state.
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>>;
/// Indicates the procedure execution status of the `State`.
fn status(&self) -> Status {
Status::Executing { persist: true }
}
pub(crate) trait State: Sync + Send + Debug {
/// Yields the next [State] and [Status].
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)>;
/// Returns as [Any](std::any::Any).
fn as_any(&self) -> &dyn Any;
@@ -340,14 +335,16 @@ impl Procedure for RegionMigrationProcedure {
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &mut self.state;
*state = state.next(&mut self.context).await.map_err(|e| {
let (next, status) = state.next(&mut self.context).await.map_err(|e| {
if matches!(e, Error::RetryLater { .. }) {
ProcedureError::retry_later(e)
} else {
ProcedureError::external(e)
}
})?;
Ok(state.status())
*state = next;
Ok(status)
}
fn dump(&self) -> ProcedureResult<String> {
@@ -367,20 +364,21 @@ impl Procedure for RegionMigrationProcedure {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::sync::Arc;
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::rpc::router::{Region, RegionRoute};
use super::migration_end::RegionMigrationEnd;
use super::update_metadata::UpdateMetadata;
use super::*;
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::test_util::TestingEnv;
use crate::procedure::region_migration::test_util::*;
use crate::service::mailbox::Channel;
fn new_persistent_context() -> PersistentContext {
PersistentContext {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
#[test]
@@ -414,20 +412,30 @@ mod tests {
assert_eq!(expected, serialized);
}
#[test]
fn test_backward_compatibility() {
let persistent_ctx = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
// NOTES: Changes it will break backward compatibility.
let serialized = r#"{"cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
let deserialized: PersistentContext = serde_json::from_str(serialized).unwrap();
assert_eq!(persistent_ctx, deserialized);
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct MockState;
#[async_trait::async_trait]
#[typetag::serde]
impl State for MockState {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let pc = &mut ctx.persistent_ctx;
if pc.cluster_id == 2 {
Ok(Box::new(RegionMigrationEnd))
Ok((Box::new(RegionMigrationEnd), Status::Done))
} else {
pc.cluster_id += 1;
Ok(Box::new(MockState))
Ok((Box::new(MockState), Status::executing(false)))
}
}
@@ -497,4 +505,145 @@ mod tests {
let instruction = HeartbeatMailbox::json_instruction(&msg).unwrap();
assert_matches!(instruction, Instruction::InvalidateTableIdCache(1024));
}
fn procedure_flow_steps(from_peer_id: u64, to_peer_id: u64) -> Vec<Step> {
vec![
// MigrationStart
Step::next(
"Should be the update metadata for downgrading",
None,
Assertion::simple(assert_update_metadata_downgrade, assert_need_persist),
),
// UpdateMetadata::Downgrade
Step::next(
"Should be the downgrade leader region",
None,
Assertion::simple(assert_downgrade_leader_region, assert_no_persist),
),
// Downgrade Candidate
Step::next(
"Should be the upgrade candidate region",
Some(mock_datanode_reply(
from_peer_id,
Arc::new(|id| Ok(new_downgrade_region_reply(id, None, true, None))),
)),
Assertion::simple(assert_upgrade_candidate_region, assert_no_persist),
),
// Upgrade Candidate
Step::next(
"Should be the update metadata for upgrading",
Some(mock_datanode_reply(
to_peer_id,
Arc::new(|id| Ok(new_upgrade_region_reply(id, true, true, None))),
)),
Assertion::simple(assert_update_metadata_upgrade, assert_no_persist),
),
// UpdateMetadata::Upgrade
Step::next(
"Should be the region migration end",
None,
Assertion::simple(assert_region_migration_end, assert_done),
),
// RegionMigrationEnd
Step::next(
"Should be the region migration end again",
None,
Assertion::simple(assert_region_migration_end, assert_done),
),
]
}
#[tokio::test]
async fn test_procedure_flow() {
common_telemetry::init_default_ut_logging();
let persistent_context = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
let state = Box::new(RegionMigrationStart);
// The table metadata.
let from_peer_id = persistent_context.from_peer.id;
let to_peer_id = persistent_context.to_peer.id;
let from_peer = persistent_context.from_peer.clone();
let to_peer = persistent_context.to_peer.clone();
let region_id = persistent_context.region_id;
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(region_id),
leader_peer: Some(from_peer),
follower_peers: vec![to_peer],
..Default::default()
}];
let suite = ProcedureMigrationTestSuite::new(persistent_context, state);
suite.init_table_metadata(table_info, region_routes).await;
let steps = procedure_flow_steps(from_peer_id, to_peer_id);
let timer = Instant::now();
// Run the table tests.
let runner = ProcedureMigrationSuiteRunner::new(suite)
.steps(steps)
.run_once()
.await;
// Ensure it didn't run into the slow path.
assert!(timer.elapsed().as_secs() < REGION_LEASE_SECS / 2);
runner.suite.verify_table_metadata().await;
}
#[tokio::test]
async fn test_procedure_flow_idempotent() {
common_telemetry::init_default_ut_logging();
let persistent_context = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
let state = Box::new(RegionMigrationStart);
// The table metadata.
let from_peer_id = persistent_context.from_peer.id;
let to_peer_id = persistent_context.to_peer.id;
let from_peer = persistent_context.from_peer.clone();
let to_peer = persistent_context.to_peer.clone();
let region_id = persistent_context.region_id;
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(region_id),
leader_peer: Some(from_peer),
follower_peers: vec![to_peer],
..Default::default()
}];
let suite = ProcedureMigrationTestSuite::new(persistent_context, state);
suite.init_table_metadata(table_info, region_routes).await;
let steps = procedure_flow_steps(from_peer_id, to_peer_id);
let setup_to_latest_persisted_state = Step::setup(
"Sets state to UpdateMetadata::Downgrade",
merge_before_test_fn(vec![
setup_state(Arc::new(|| Box::new(UpdateMetadata::Downgrade))),
Arc::new(reset_volatile_ctx),
]),
);
let steps = [
steps.clone(),
vec![setup_to_latest_persisted_state.clone()],
steps.clone()[1..].to_vec(),
vec![setup_to_latest_persisted_state],
steps.clone()[1..].to_vec(),
]
.concat();
let timer = Instant::now();
// Run the table tests.
let runner = ProcedureMigrationSuiteRunner::new(suite)
.steps(steps.clone())
.run_once()
.await;
// Ensure it didn't run into the slow path.
assert!(timer.elapsed().as_secs() < REGION_LEASE_SECS / 2);
runner.suite.verify_table_metadata().await;
}
}

View File

@@ -20,7 +20,8 @@ use common_meta::distributed_time_constants::{MAILBOX_RTT_SECS, REGION_LEASE_SEC
use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply,
};
use common_telemetry::warn;
use common_procedure::Status;
use common_telemetry::{info, warn};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use tokio::time::sleep;
@@ -53,18 +54,24 @@ impl Default for DowngradeLeaderRegion {
#[async_trait::async_trait]
#[typetag::serde]
impl State for DowngradeLeaderRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
// Ensures the `leader_region_lease_deadline` must exist after recovering.
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
self.downgrade_region_with_retry(ctx).await;
// Safety: must exist.
if let Some(deadline) = ctx.volatile_ctx.leader_region_lease_deadline.as_ref() {
info!(
"Running into the downgrade leader slow path, sleep until {:?}",
deadline
);
tokio::time::sleep_until(*deadline).await;
}
Ok(Box::<UpgradeCandidateRegion>::default())
Ok((
Box::<UpgradeCandidateRegion>::default(),
Status::executing(false),
))
}
fn as_any(&self) -> &dyn Any {
@@ -202,16 +209,14 @@ impl DowngradeLeaderRegion {
mod tests {
use std::assert_matches::assert_matches;
use api::v1::meta::mailbox_message::Payload;
use common_meta::peer::Peer;
use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use tokio::time::Instant;
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::{
new_close_region_reply, send_mock_reply, TestingEnv,
new_close_region_reply, new_downgrade_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
@@ -224,29 +229,6 @@ mod tests {
}
}
fn new_downgrade_region_reply(
id: u64,
last_entry_id: Option<u64>,
exist: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: exist,
error,
}))
.unwrap(),
)),
}
}
#[tokio::test]
async fn test_datanode_is_unreachable() {
let state = DowngradeLeaderRegion::default();
@@ -504,7 +486,7 @@ mod tests {
});
let timer = Instant::now();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let elapsed = timer.elapsed().as_secs();
assert!(elapsed < REGION_LEASE_SECS / 2);
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, Some(1));

View File

@@ -37,17 +37,13 @@ impl RegionMigrationAbort {
#[async_trait::async_trait]
#[typetag::serde]
impl State for RegionMigrationAbort {
async fn next(&mut self, _: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
error::MigrationAbortSnafu {
reason: &self.reason,
}
.fail()
}
fn status(&self) -> Status {
Status::Done
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -26,12 +26,8 @@ pub struct RegionMigrationEnd;
#[async_trait::async_trait]
#[typetag::serde]
impl State for RegionMigrationEnd {
async fn next(&mut self, _: &mut Context) -> Result<Box<dyn State>> {
Ok(Box::new(RegionMigrationEnd))
}
fn status(&self) -> Status {
Status::Done
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
Ok((Box::new(RegionMigrationEnd), Status::Done))
}
fn as_any(&self) -> &dyn Any {

View File

@@ -16,16 +16,24 @@ use std::any::Any;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use store_api::storage::RegionId;
use super::downgrade_leader_region::DowngradeLeaderRegion;
use super::migration_end::RegionMigrationEnd;
use super::open_candidate_region::OpenCandidateRegion;
use super::update_metadata::UpdateMetadata;
use crate::error::{self, Result};
use crate::procedure::region_migration::{Context, State};
/// The behaviors:
///
/// If the expected leader region has been opened on `to_peer`, go to the [RegionMigrationEnd] state.
///
/// If the candidate region has been opened on `to_peer`, go to the [UpdateMetadata::Downgrade] state.
///
/// Otherwise go to the [OpenCandidateRegion] state.
#[derive(Debug, Serialize, Deserialize)]
pub struct RegionMigrationStart;
@@ -34,22 +42,22 @@ pub struct RegionMigrationStart;
impl State for RegionMigrationStart {
/// Yields next [State].
///
/// If the expected leader region has been opened on `to_peer`, go to the MigrationEnd state.
/// If the expected leader region has been opened on `to_peer`, go to the [RegionMigrationEnd] state.
///
/// If the candidate region has been opened on `to_peer`, go to the DowngradeLeader state.
/// If the candidate region has been opened on `to_peer`, go to the [UpdateMetadata::Downgrade] state.
///
/// Otherwise go to the OpenCandidateRegion state.
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
/// Otherwise go to the [OpenCandidateRegion] state.
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let region_id = ctx.persistent_ctx.region_id;
let region_route = self.retrieve_region_route(ctx, region_id).await?;
let to_peer = &ctx.persistent_ctx.to_peer;
if self.check_leader_region_on_peer(&region_route, to_peer)? {
Ok(Box::new(RegionMigrationEnd))
Ok((Box::new(RegionMigrationEnd), Status::Done))
} else if self.check_candidate_region_on_peer(&region_route, to_peer) {
Ok(Box::<DowngradeLeaderRegion>::default())
Ok((Box::new(UpdateMetadata::Downgrade), Status::executing(true)))
} else {
Ok(Box::new(OpenCandidateRegion))
Ok((Box::new(OpenCandidateRegion), Status::executing(true)))
}
}
@@ -138,6 +146,7 @@ mod tests {
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
@@ -216,12 +225,11 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()
.downcast_ref::<DowngradeLeaderRegion>()
.unwrap();
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(update_metadata, UpdateMetadata::Downgrade);
}
#[tokio::test]
@@ -250,7 +258,7 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
}
@@ -277,7 +285,7 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<OpenCandidateRegion>().unwrap();
}

View File

@@ -21,6 +21,7 @@ use common_meta::ddl::utils::region_storage_path;
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
use common_meta::RegionIdent;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -38,11 +39,14 @@ pub struct OpenCandidateRegion;
#[async_trait::async_trait]
#[typetag::serde]
impl State for OpenCandidateRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let instruction = self.build_open_region_instruction(ctx).await?;
self.open_candidate_region(ctx, instruction).await?;
Ok(Box::<DowngradeLeaderRegion>::default())
Ok((
Box::<DowngradeLeaderRegion>::default(),
Status::executing(false),
))
}
fn as_any(&self) -> &dyn Any {
@@ -430,7 +434,7 @@ mod tests {
send_mock_reply(mailbox, rx, |id| Ok(new_open_region_reply(id, true, None)));
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let vc = ctx.volatile_ctx;
assert_eq!(
vc.opening_region_guard.unwrap().info(),

View File

@@ -12,24 +12,36 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::assert_matches::assert_matches;
use std::sync::Arc;
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{InstructionReply, SimpleReply};
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use common_meta::sequence::Sequence;
use common_procedure::{Context as ProcedureContext, ProcedureId};
use common_meta::DatanodeId;
use common_procedure::{Context as ProcedureContext, ProcedureId, Status};
use common_procedure_test::MockContextProvider;
use common_telemetry::debug;
use common_time::util::current_time_millis;
use futures::future::BoxFuture;
use store_api::storage::RegionId;
use table::metadata::RawTableInfo;
use tokio::sync::mpsc::{Receiver, Sender};
use super::ContextFactoryImpl;
use super::upgrade_candidate_region::UpgradeCandidateRegion;
use super::{Context, ContextFactory, ContextFactoryImpl, State, VolatileContext};
use crate::error::Result;
use crate::handler::{HeartbeatMailbox, Pusher, Pushers};
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::migration_end::RegionMigrationEnd;
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::PersistentContext;
use crate::region::lease_keeper::{OpeningRegionKeeper, OpeningRegionKeeperRef};
use crate::service::mailbox::{Channel, MailboxRef};
@@ -147,11 +159,59 @@ pub fn new_close_region_reply(id: u64) -> MailboxMessage {
}
}
/// Generates a [InstructionReply::DowngradeRegion] reply.
pub fn new_downgrade_region_reply(
id: u64,
last_entry_id: Option<u64>,
exist: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: exist,
error,
}))
.unwrap(),
)),
}
}
/// Generates a [InstructionReply::UpgradeRegion] reply.
pub fn new_upgrade_region_reply(
id: u64,
ready: bool,
exists: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready,
exists,
error,
}))
.unwrap(),
)),
}
}
/// Sends a mock reply.
pub fn send_mock_reply(
mailbox: MailboxRef,
mut rx: MockHeartbeatReceiver,
msg: impl FnOnce(u64) -> Result<MailboxMessage> + Send + 'static,
msg: impl Fn(u64) -> Result<MailboxMessage> + Send + 'static,
) {
common_runtime::spawn_bg(async move {
let resp = rx.recv().await.unwrap().unwrap();
@@ -169,3 +229,300 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
cluster_id: 0,
}
}
/// The test suite for region migration procedure.
pub(crate) struct ProcedureMigrationTestSuite {
pub(crate) env: TestingEnv,
context: Context,
state: Box<dyn State>,
}
/// The hook is called before the test starts.
pub(crate) type BeforeTest =
Arc<dyn Fn(&mut ProcedureMigrationTestSuite) -> BoxFuture<'_, ()> + Send + Sync>;
/// Custom assertion.
pub(crate) type CustomAssertion = Arc<
dyn Fn(
&mut ProcedureMigrationTestSuite,
Result<(Box<dyn State>, Status)>,
) -> BoxFuture<'_, Result<()>>
+ Send
+ Sync,
>;
/// State assertion function.
pub(crate) type StateAssertion = Arc<dyn Fn(&dyn State) + Send + Sync>;
/// Status assertion function.
pub(crate) type StatusAssertion = Arc<dyn Fn(Status) + Send + Sync>;
// TODO(weny): Remove it.
#[allow(dead_code)]
/// The type of assertion.
#[derive(Clone)]
pub(crate) enum Assertion {
Simple(StateAssertion, StatusAssertion),
Custom(CustomAssertion),
}
impl Assertion {
/// Returns an [Assertion::Simple].
pub(crate) fn simple<
T: Fn(&dyn State) + Send + Sync + 'static,
U: Fn(Status) + Send + Sync + 'static,
>(
state: T,
status: U,
) -> Self {
Self::Simple(Arc::new(state), Arc::new(status))
}
}
impl ProcedureMigrationTestSuite {
/// Returns a [ProcedureMigrationTestSuite].
pub(crate) fn new(persistent_ctx: PersistentContext, start: Box<dyn State>) -> Self {
let env = TestingEnv::new();
let context = env.context_factory().new_context(persistent_ctx);
Self {
env,
context,
state: start,
}
}
/// Mocks the `next` of [State] is called.
pub(crate) async fn next(
&mut self,
name: &str,
before: Option<BeforeTest>,
assertion: Assertion,
) -> Result<()> {
debug!("suite test: {name}");
if let Some(before) = before {
before(self).await;
}
debug!("suite test: {name} invoking next");
let result = self.state.next(&mut self.context).await;
match assertion {
Assertion::Simple(state_assert, status_assert) => {
let (next, status) = result?;
state_assert(&*next);
status_assert(status);
self.state = next;
}
Assertion::Custom(assert_fn) => {
assert_fn(self, result);
}
}
Ok(())
}
/// Initializes table metadata.
pub(crate) async fn init_table_metadata(
&self,
table_info: RawTableInfo,
region_routes: Vec<RegionRoute>,
) {
self.env
.table_metadata_manager()
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
}
/// Verifies table metadata after region migration.
pub(crate) async fn verify_table_metadata(&self) {
let region_id = self.context.persistent_ctx.region_id;
let region_routes = self
.env
.table_metadata_manager
.table_route_manager()
.get(region_id.table_id())
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
let expected_leader_id = self.context.persistent_ctx.to_peer.id;
let removed_follower_id = self.context.persistent_ctx.from_peer.id;
let region_route = region_routes
.into_iter()
.find(|route| route.region.id == region_id)
.unwrap();
assert!(!region_route.is_leader_downgraded());
assert_eq!(region_route.leader_peer.unwrap().id, expected_leader_id);
assert!(!region_route
.follower_peers
.into_iter()
.any(|route| route.id == removed_follower_id))
}
}
/// The step of test.
#[derive(Clone)]
pub enum Step {
Setup((String, BeforeTest)),
Next((String, Option<BeforeTest>, Assertion)),
}
impl Step {
/// Returns the [Step::Setup].
pub(crate) fn setup(name: &str, before: BeforeTest) -> Self {
Self::Setup((name.to_string(), before))
}
/// Returns the [Step::Next].
pub(crate) fn next(name: &str, before: Option<BeforeTest>, assertion: Assertion) -> Self {
Self::Next((name.to_string(), before, assertion))
}
}
/// The test runner of [ProcedureMigrationTestSuite].
pub(crate) struct ProcedureMigrationSuiteRunner {
pub(crate) suite: ProcedureMigrationTestSuite,
steps: Vec<Step>,
}
impl ProcedureMigrationSuiteRunner {
/// Returns the [ProcedureMigrationSuiteRunner]
pub(crate) fn new(suite: ProcedureMigrationTestSuite) -> Self {
Self {
suite,
steps: vec![],
}
}
/// Sets [Step]s .
pub(crate) fn steps(self, steps: Vec<Step>) -> Self {
Self {
suite: self.suite,
steps,
}
}
/// Consumes all steps and runs once.
pub(crate) async fn run_once(mut self) -> Self {
for step in self.steps.drain(..) {
match step {
Step::Setup((name, before)) => {
debug!("Running the before hook: {name}");
before(&mut self.suite).await;
}
Step::Next((name, before, assertion)) => {
self.suite.next(&name, before, assertion).await.unwrap();
}
}
}
self
}
}
/// Asserts the [Status] needs to be persistent.
pub(crate) fn assert_need_persist(status: Status) {
assert!(status.need_persist());
}
/// Asserts the [Status] doesn't need to be persistent.
pub(crate) fn assert_no_persist(status: Status) {
assert!(!status.need_persist());
}
/// Asserts the [Status] should be [Status::Done].
pub(crate) fn assert_done(status: Status) {
assert_matches!(status, Status::Done)
}
/// Asserts the [State] should be [UpdateMetadata::Downgrade].
pub(crate) fn assert_update_metadata_downgrade(next: &dyn State) {
let state = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(state, UpdateMetadata::Downgrade);
}
/// Asserts the [State] should be [UpdateMetadata::Upgrade].
pub(crate) fn assert_update_metadata_upgrade(next: &dyn State) {
let state = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(state, UpdateMetadata::Upgrade);
}
/// Asserts the [State] should be [RegionMigrationEnd].
pub(crate) fn assert_region_migration_end(next: &dyn State) {
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
}
/// Asserts the [State] should be [DowngradeLeaderRegion].
pub(crate) fn assert_downgrade_leader_region(next: &dyn State) {
let _ = next
.as_any()
.downcast_ref::<DowngradeLeaderRegion>()
.unwrap();
}
/// Asserts the [State] should be [UpgradeCandidateRegion].
pub(crate) fn assert_upgrade_candidate_region(next: &dyn State) {
let _ = next
.as_any()
.downcast_ref::<UpgradeCandidateRegion>()
.unwrap();
}
/// Mocks the reply from the datanode.
pub(crate) fn mock_datanode_reply(
peer_id: DatanodeId,
msg: Arc<dyn Fn(u64) -> Result<MailboxMessage> + Send + Sync>,
) -> BeforeTest {
Arc::new(move |suite| {
let msg_moved = msg.clone();
Box::pin(async move {
let mailbox_ctx = suite.env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(peer_id), tx)
.await;
send_mock_reply(mailbox, rx, move |id| msg_moved(id));
})
})
}
/// Setups the [State] of the [ProcedureMigrationTestSuite].
pub(crate) fn setup_state(
state_factory: Arc<dyn Fn() -> Box<dyn State> + Send + Sync>,
) -> BeforeTest {
Arc::new(move |suite| {
let factory_moved = state_factory.clone();
Box::pin(async move {
suite.state = factory_moved();
})
})
}
/// Setups the [VolatileContext] of the [Context].
pub(crate) fn reset_volatile_ctx(suite: &mut ProcedureMigrationTestSuite) -> BoxFuture<'_, ()> {
Box::pin(async {
suite.context.volatile_ctx = VolatileContext::default();
})
}
/// Merges the batch of [BeforeTest].
pub(crate) fn merge_before_test_fn(hooks: Vec<BeforeTest>) -> BeforeTest {
Arc::new(move |suite| {
let hooks_moved = hooks.clone();
Box::pin(async move {
for hook in hooks_moved {
hook(suite).await;
}
})
})
}

View File

@@ -18,6 +18,7 @@ pub(crate) mod upgrade_candidate_region;
use std::any::Any;
use common_procedure::Status;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
@@ -41,12 +42,15 @@ pub enum UpdateMetadata {
#[async_trait::async_trait]
#[typetag::serde]
impl State for UpdateMetadata {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
match self {
UpdateMetadata::Downgrade => {
self.downgrade_leader_region(ctx).await?;
Ok(Box::<DowngradeLeaderRegion>::default())
Ok((
Box::<DowngradeLeaderRegion>::default(),
Status::executing(false),
))
}
UpdateMetadata::Upgrade => {
self.upgrade_candidate_region(ctx).await?;
@@ -54,7 +58,7 @@ impl State for UpdateMetadata {
if let Err(err) = ctx.invalidate_table_cache().await {
warn!("Failed to broadcast the invalidate table cache message during the upgrade candidate, error: {err:?}");
};
Ok(Box::new(RegionMigrationEnd))
Ok((Box::new(RegionMigrationEnd), Status::Done))
}
UpdateMetadata::Rollback => {
self.rollback_downgraded_region(ctx).await?;
@@ -62,9 +66,12 @@ impl State for UpdateMetadata {
if let Err(err) = ctx.invalidate_table_cache().await {
warn!("Failed to broadcast the invalidate table cache message during the rollback, error: {err:?}");
};
Ok(Box::new(RegionMigrationAbort::new(
"Failed to upgrade the candidate region.",
)))
Ok((
Box::new(RegionMigrationAbort::new(
"Failed to upgrade the candidate region.",
)),
Status::executing(false),
))
}
}
}

View File

@@ -38,13 +38,19 @@ impl UpdateMetadata {
/// - There is no other DDL procedure executed concurrently for the current table.
pub async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
let table_metadata_manager = ctx.table_metadata_manager.clone();
let from_peer_id = ctx.persistent_ctx.from_peer.id;
let region_id = ctx.region_id();
let table_id = region_id.table_id();
let current_table_route_value = ctx.get_table_route_value().await?;
if let Err(err) = table_metadata_manager
.update_leader_region_status(table_id, current_table_route_value, |route| {
if route.region.id == region_id {
if route.region.id == region_id
&& route
.leader_peer
.as_ref()
.is_some_and(|leader_peer| leader_peer.id == from_peer_id)
{
Some(Some(RegionStatus::Downgraded))
} else {
None
@@ -167,6 +173,48 @@ mod tests {
assert!(err.to_string().contains("Failed to update the table route"));
}
#[tokio::test]
async fn test_only_downgrade_from_peer() {
let mut state = Box::new(UpdateMetadata::Downgrade);
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(1024)),
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()
.downcast_ref::<DowngradeLeaderRegion>()
.unwrap();
let latest_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
// It should remain unchanged.
assert_eq!(latest_table_route.version(), 0);
assert!(!latest_table_route.region_routes[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
#[tokio::test]
async fn test_next_downgrade_leader_region_state() {
let mut state = Box::new(UpdateMetadata::Downgrade);
@@ -190,7 +238,7 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()

View File

@@ -219,7 +219,7 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()

View File

@@ -76,6 +76,40 @@ impl UpdateMetadata {
Ok(region_routes)
}
/// Returns true if region metadata has been updated.
async fn check_metadata_updated(&self, ctx: &mut Context) -> Result<bool> {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
let region_routes = table_route_value.region_routes.clone();
let region_route = region_routes
.into_iter()
.find(|route| route.region.id == region_id)
.context(error::RegionRouteNotFoundSnafu { region_id })?;
let leader_peer = region_route
.leader_peer
.as_ref()
.context(error::UnexpectedSnafu {
violated: format!("The leader peer of region {region_id} is not found during the update metadata for upgrading"),
})?;
let candidate_peer_id = ctx.persistent_ctx.to_peer.id;
if leader_peer.id == candidate_peer_id {
ensure!(
!region_route.is_leader_downgraded(),
error::UnexpectedSnafu {
violated: format!("Unexpected intermediate state is found during the update metadata for upgrading region {region_id}"),
}
);
Ok(true)
} else {
Ok(false)
}
}
/// Upgrades the candidate region.
///
/// Abort(non-retry):
@@ -89,6 +123,10 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_metadata_manager = ctx.table_metadata_manager.clone();
if self.check_metadata_updated(ctx).await? {
return Ok(());
}
let region_routes = self.build_upgrade_candidate_region_metadata(ctx).await?;
let table_info_value = ctx.get_table_info_value().await?;
@@ -325,6 +363,85 @@ mod tests {
assert!(err.to_string().contains("Failed to update the table route"));
}
#[tokio::test]
async fn test_check_metadata() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let leader_peer = persistent_context.from_peer.clone();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(leader_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: None,
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(!updated);
}
#[tokio::test]
async fn test_check_metadata_updated() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let candidate_peer = persistent_context.to_peer.clone();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: None,
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(updated);
}
#[tokio::test]
async fn test_check_metadata_intermediate_state() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let candidate_peer = persistent_context.to_peer.clone();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let err = state.check_metadata_updated(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(err.to_string().contains("intermediate state"));
}
#[tokio::test]
async fn test_next_migration_end_state() {
let mut state = Box::new(UpdateMetadata::Upgrade);
@@ -353,7 +470,7 @@ mod tests {
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();

View File

@@ -18,6 +18,7 @@ use std::time::Duration;
use api::v1::meta::MailboxMessage;
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
use common_procedure::Status;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
@@ -56,11 +57,11 @@ impl Default for UpgradeCandidateRegion {
#[async_trait::async_trait]
#[typetag::serde]
impl State for UpgradeCandidateRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
if self.upgrade_region_with_retry(ctx).await {
Ok(Box::new(UpdateMetadata::Upgrade))
Ok((Box::new(UpdateMetadata::Upgrade), Status::executing(false)))
} else {
Ok(Box::new(UpdateMetadata::Rollback))
Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)))
}
}
@@ -219,15 +220,13 @@ impl UpgradeCandidateRegion {
mod tests {
use std::assert_matches::assert_matches;
use api::v1::meta::mailbox_message::Payload;
use common_meta::peer::Peer;
use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::{
new_close_region_reply, send_mock_reply, TestingEnv,
new_close_region_reply, new_upgrade_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
@@ -240,31 +239,6 @@ mod tests {
}
}
fn new_upgrade_region_reply(
id: u64,
ready: bool,
exists: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready,
exists,
error,
}))
.unwrap(),
)),
}
}
#[tokio::test]
async fn test_datanode_is_unreachable() {
let state = UpgradeCandidateRegion::default();
@@ -495,7 +469,7 @@ mod tests {
.unwrap();
});
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
@@ -554,7 +528,7 @@ mod tests {
.unwrap();
});
let next = state.next(&mut ctx).await.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(update_metadata, UpdateMetadata::Rollback);

View File

@@ -93,7 +93,7 @@ impl RegionLeaseKeeper {
let table_ids = tables.keys().copied().collect::<Vec<_>>();
let metadata_subset = {
let _timer = metrics::METRIC_META_LOAD_LEADER_METADATA.start_timer();
let _timer = metrics::METRIC_META_LOAD_LEADER_METADATA_ELAPSED.start_timer();
self.collect_tables_metadata(&table_ids).await?
};
@@ -140,7 +140,7 @@ impl RegionLeaseKeeper {
let table_ids = tables.keys().copied().collect::<Vec<_>>();
let metadata_subset = {
let _timer = metrics::METRIC_META_LOAD_FOLLOWER_METADATA.start_timer();
let _timer = metrics::METRIC_META_LOAD_FOLLOWER_METADATA_ELAPSED.start_timer();
self.collect_tables_metadata(&table_ids).await?
};

View File

@@ -55,10 +55,12 @@ impl heartbeat_server::Heartbeat for MetaSrv {
Some(header) => header,
None => {
let err = error::MissingRequestHeaderSnafu {}.build();
tx.send(Err(err.into())).await.expect("working rx");
error!("Exit on malformed request: MissingRequestHeader");
let _ = tx.send(Err(err.into())).await;
break;
}
};
debug!("Receiving heartbeat request: {:?}", req);
if pusher_key.is_none() {
@@ -78,7 +80,10 @@ impl heartbeat_server::Heartbeat for MetaSrv {
is_not_leader = res.as_ref().map_or(false, |r| r.is_not_leader());
debug!("Sending heartbeat response: {:?}", res);
tx.send(res).await.expect("working rx");
if tx.send(res).await.is_err() {
info!("ReceiverStream was dropped; shutting down");
break;
}
}
Err(err) => {
if let Some(io_err) = error::match_for_io_error(&err) {
@@ -89,9 +94,9 @@ impl heartbeat_server::Heartbeat for MetaSrv {
}
}
match tx.send(Err(err)).await {
Ok(_) => (),
Err(_err) => break, // response was dropped
if tx.send(Err(err)).await.is_err() {
info!("ReceiverStream was dropped; shutting down");
break;
}
}
}
@@ -101,10 +106,12 @@ impl heartbeat_server::Heartbeat for MetaSrv {
break;
}
}
info!(
"Heartbeat stream broken: {:?}",
"Heartbeat stream closed: {:?}",
pusher_key.as_ref().unwrap_or(&"unknown".to_string())
);
if let Some(key) = pusher_key {
let _ = handler_group.unregister(&key).await;
}

View File

@@ -33,7 +33,7 @@ use tonic::{Request, Response};
use crate::error::{self, MissingRequestHeaderSnafu};
use crate::metasrv::MetaSrv;
use crate::metrics::METRIC_META_KV_REQUEST;
use crate::metrics::METRIC_META_KV_REQUEST_ELAPSED;
use crate::service::GrpcResult;
#[async_trait::async_trait]
@@ -48,7 +48,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[self.kv_backend().name(), "range", cluster_id_str.as_str()])
.start_timer();
@@ -74,7 +74,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[self.kv_backend().name(), "put", cluster_id_str.as_str()])
.start_timer();
@@ -100,7 +100,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_get",
@@ -130,7 +130,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_pub",
@@ -163,7 +163,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_delete",
@@ -196,7 +196,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"compare_and_put",
@@ -229,7 +229,7 @@ impl store_server::Store for MetaSrv {
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"delete_range",

View File

@@ -94,7 +94,8 @@ impl LeaderCachedKvBackend {
/// The caller MUST ensure during the loading, there are no mutation requests reaching the `LeaderCachedKvStore`.
pub async fn load(&self) -> Result<()> {
for prefix in &CACHE_KEY_PREFIXES[..] {
let _timer = metrics::METRIC_META_LEADER_CACHED_KV_LOAD.with_label_values(&[prefix]);
let _timer =
metrics::METRIC_META_LEADER_CACHED_KV_LOAD_ELAPSED.with_label_values(&[prefix]);
// TODO(weny): Refactors PaginationStream's output to unary output.
let stream = PaginationStream::new(
@@ -261,8 +262,9 @@ impl KvBackend for LeaderCachedKvBackend {
.map(|kv| kv.key.clone())
.collect::<HashSet<_>>();
let hit_rate = hit_keys.len() as f64 / req.keys.len() as f64;
metrics::METRIC_META_KV_CACHE_BATCH_GET_HIT_RATE.set(hit_rate);
metrics::METRIC_META_KV_CACHE_HIT
.with_label_values(&[&"batch_get"])
.inc_by(hit_keys.len() as u64);
let missed_keys = req
.keys
@@ -270,6 +272,10 @@ impl KvBackend for LeaderCachedKvBackend {
.filter(|key| !hit_keys.contains(*key))
.cloned()
.collect::<Vec<_>>();
metrics::METRIC_META_KV_CACHE_MISS
.with_label_values(&[&"batch_get"])
.inc_by(missed_keys.len() as u64);
let remote_req = BatchGetRequest { keys: missed_keys };
let ver = self.get_version();

View File

@@ -13,14 +13,13 @@
// limitations under the License.
use api::v1::SemanticType;
use common_query::Output;
use common_telemetry::tracing::warn;
use mito2::engine::MitoEngine;
use snafu::ResultExt;
use store_api::metadata::ColumnMetadata;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{
AddColumn, AlterKind, RegionAlterRequest, RegionPutRequest, RegionRequest,
AddColumn, AffectedRows, AlterKind, RegionAlterRequest, RegionPutRequest, RegionRequest,
};
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::RegionId;
@@ -137,7 +136,7 @@ impl DataRegion {
&self,
region_id: RegionId,
request: RegionPutRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
let region_id = utils::to_data_region_id(region_id);
self.mito
.handle_request(region_id, RegionRequest::Put(request))

View File

@@ -28,7 +28,7 @@ use common_recordbatch::SendableRecordBatchStream;
use mito2::engine::MitoEngine;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_request::RegionRequest;
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use tokio::sync::RwLock;
@@ -108,30 +108,20 @@ impl RegionEngine for MetricEngine {
METRIC_ENGINE_NAME
}
/// Handles request to the region.
///
/// Only query is not included, which is handled in `handle_query`
/// Handles non-query request to the region. Returns the count of affected rows.
async fn handle_request(
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output, BoxedError> {
) -> Result<AffectedRows, BoxedError> {
let result = match request {
RegionRequest::Put(put) => self.inner.put_region(region_id, put).await,
RegionRequest::Delete(_) => todo!(),
RegionRequest::Create(create) => self
.inner
.create_region(region_id, create)
.await
.map(|_| Output::AffectedRows(0)),
RegionRequest::Create(create) => self.inner.create_region(region_id, create).await,
RegionRequest::Drop(_) => todo!(),
RegionRequest::Open(_) => todo!(),
RegionRequest::Close(_) => todo!(),
RegionRequest::Alter(alter) => self
.inner
.alter_region(region_id, alter)
.await
.map(|_| Output::AffectedRows(0)),
RegionRequest::Alter(alter) => self.inner.alter_region(region_id, alter).await,
RegionRequest::Flush(_) => todo!(),
RegionRequest::Compact(_) => todo!(),
RegionRequest::Truncate(_) => todo!(),

View File

@@ -14,7 +14,7 @@
use common_telemetry::{error, info};
use snafu::OptionExt;
use store_api::region_request::{AlterKind, RegionAlterRequest};
use store_api::region_request::{AffectedRows, AlterKind, RegionAlterRequest};
use store_api::storage::RegionId;
use crate::engine::MetricEngineInner;
@@ -28,18 +28,21 @@ impl MetricEngineInner {
&self,
region_id: RegionId,
request: RegionAlterRequest,
) -> Result<()> {
) -> Result<AffectedRows> {
let is_altering_logical_region = self
.state
.read()
.await
.physical_regions()
.contains_key(&region_id);
if is_altering_logical_region {
let result = if is_altering_logical_region {
self.alter_physical_region(region_id, request).await
} else {
self.alter_logical_region(region_id, request).await
}
};
result.map(|_| 0)
}
async fn alter_logical_region(

View File

@@ -25,7 +25,7 @@ use object_store::util::join_dir;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::ColumnMetadata;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionCreateRequest, RegionRequest};
use store_api::region_request::{AffectedRows, RegionCreateRequest, RegionRequest};
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::RegionId;
@@ -50,16 +50,18 @@ impl MetricEngineInner {
&self,
region_id: RegionId,
request: RegionCreateRequest,
) -> Result<()> {
) -> Result<AffectedRows> {
Self::verify_region_create_request(&request)?;
if request.options.contains_key(PHYSICAL_TABLE_METADATA_KEY) {
let result = if request.options.contains_key(PHYSICAL_TABLE_METADATA_KEY) {
self.create_physical_region(region_id, request).await
} else if request.options.contains_key(LOGICAL_TABLE_METADATA_KEY) {
self.create_logical_region(region_id, request).await
} else {
MissingRegionOptionSnafu {}.fail()
}
};
result.map(|_| 0)
}
/// Initialize a physical metric region at given region id.

View File

@@ -17,11 +17,9 @@ use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType};
use common_query::Output;
use common_telemetry::{error, info};
use datatypes::data_type::ConcreteDataType;
use snafu::OptionExt;
use store_api::region_request::RegionPutRequest;
use store_api::region_request::{AffectedRows, RegionPutRequest};
use store_api::storage::{RegionId, TableId};
use crate::consts::{DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME, RANDOM_STATE};
@@ -38,7 +36,7 @@ impl MetricEngineInner {
&self,
region_id: RegionId,
request: RegionPutRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
let is_putting_physical_region = self
.state
.read()
@@ -62,7 +60,7 @@ impl MetricEngineInner {
&self,
logical_region_id: RegionId,
mut request: RegionPutRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
let physical_region_id = *self
.state
.read()
@@ -208,7 +206,6 @@ impl MetricEngineInner {
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use store_api::region_engine::RegionEngine;
use store_api::region_request::RegionRequest;
@@ -231,14 +228,11 @@ mod tests {
// write data
let logical_region_id = env.default_logical_region_id();
let Output::AffectedRows(count) = env
let count = env
.metric()
.handle_request(logical_region_id, request)
.await
.unwrap()
else {
panic!()
};
.unwrap();
assert_eq!(count, 5);
// read data from physical region
@@ -306,13 +300,10 @@ mod tests {
});
// write data
let Output::AffectedRows(count) = engine
let count = engine
.handle_request(logical_region_id, request)
.await
.unwrap()
else {
panic!()
};
.unwrap();
assert_eq!(100, count);
}

View File

@@ -340,7 +340,6 @@ impl CompactionStatus {
mod tests {
use std::sync::Mutex;
use common_query::Output;
use tokio::sync::oneshot;
use super::*;
@@ -371,7 +370,7 @@ mod tests {
)
.unwrap();
let output = output_rx.await.unwrap().unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
assert_eq!(output, 0);
assert!(scheduler.region_status.is_empty());
// Only one file, picker won't compact it.
@@ -389,7 +388,7 @@ mod tests {
)
.unwrap();
let output = output_rx.await.unwrap().unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
assert_eq!(output, 0);
assert!(scheduler.region_status.is_empty());
}

View File

@@ -18,7 +18,6 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use common_base::readable_size::ReadableSize;
use common_query::Output;
use common_telemetry::{debug, error, info};
use common_time::timestamp::TimeUnit;
use common_time::timestamp_millis::BucketAligned;
@@ -158,7 +157,7 @@ impl Picker for TwcsPicker {
if outputs.is_empty() && expired_ssts.is_empty() {
// Nothing to compact, we are done. Notifies all waiters as we consume the compaction request.
for waiter in waiters {
waiter.send(Ok(Output::AffectedRows(0)));
waiter.send(Ok(0));
}
return None;
}

View File

@@ -44,14 +44,13 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_error::ext::BoxedError;
use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
use object_store::manager::ObjectStoreManagerRef;
use snafu::{OptionExt, ResultExt};
use store_api::logstore::LogStore;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
use store_api::region_request::RegionRequest;
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use crate::config::MitoConfig;
@@ -147,7 +146,11 @@ impl EngineInner {
}
/// Handles [RegionRequest] and return its executed result.
async fn handle_request(&self, region_id: RegionId, request: RegionRequest) -> Result<Output> {
async fn handle_request(
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<AffectedRows> {
let _timer = HANDLE_REQUEST_ELAPSED
.with_label_values(&[request.type_name()])
.start_timer();
@@ -220,7 +223,7 @@ impl RegionEngine for MitoEngine {
&self,
region_id: RegionId,
request: RegionRequest,
) -> std::result::Result<Output, BoxedError> {
) -> Result<AffectedRows, BoxedError> {
self.inner
.handle_request(region_id, request)
.await

View File

@@ -110,7 +110,7 @@ async fn test_region_replay() {
let engine = env.reopen_engine(engine, MitoConfig::default()).await;
let open_region = engine
let rows = engine
.handle_request(
region_id,
RegionRequest::Open(RegionOpenRequest {
@@ -121,9 +121,6 @@ async fn test_region_replay() {
)
.await
.unwrap();
let Output::AffectedRows(rows) = open_region else {
unreachable!()
};
assert_eq!(0, rows);
let request = ScanRequest::default();

View File

@@ -15,7 +15,6 @@
use std::ops::Range;
use api::v1::{ColumnSchema, Rows};
use common_query::Output;
use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::TimestampMillisecondVector;
@@ -43,7 +42,7 @@ async fn put_and_flush(
};
put_rows(engine, region_id, rows).await;
let Output::AffectedRows(rows) = engine
let rows = engine
.handle_request(
region_id,
RegionRequest::Flush(RegionFlushRequest {
@@ -51,10 +50,7 @@ async fn put_and_flush(
}),
)
.await
.unwrap()
else {
unreachable!()
};
.unwrap();
assert_eq!(0, rows);
}
@@ -70,20 +66,16 @@ async fn delete_and_flush(
rows: build_rows_for_key("a", rows.start, rows.end, 0),
};
let deleted = engine
let rows_affected = engine
.handle_request(
region_id,
RegionRequest::Delete(RegionDeleteRequest { rows }),
)
.await
.unwrap();
let Output::AffectedRows(rows_affected) = deleted else {
unreachable!()
};
assert_eq!(row_cnt, rows_affected);
let Output::AffectedRows(rows) = engine
let rows = engine
.handle_request(
region_id,
RegionRequest::Flush(RegionFlushRequest {
@@ -91,10 +83,7 @@ async fn delete_and_flush(
}),
)
.await
.unwrap()
else {
unreachable!()
};
.unwrap();
assert_eq!(0, rows);
}
@@ -142,7 +131,7 @@ async fn test_compaction_region() {
.handle_request(region_id, RegionRequest::Compact(RegionCompactRequest {}))
.await
.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
assert_eq!(output, 0);
let scanner = engine.scanner(region_id, ScanRequest::default()).unwrap();
assert_eq!(

View File

@@ -18,7 +18,6 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use common_query::Output;
use common_telemetry::{error, info};
use snafu::ResultExt;
use store_api::storage::RegionId;
@@ -214,7 +213,7 @@ impl RegionFlushTask {
/// Consumes the task and notify the sender the job is success.
fn on_success(self) {
for sender in self.senders {
sender.send(Ok(Output::AffectedRows(0)));
sender.send(Ok(0));
}
}
@@ -736,7 +735,7 @@ mod tests {
.unwrap();
assert!(scheduler.region_status.is_empty());
let output = output_rx.await.unwrap().unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
assert_eq!(output, 0);
assert!(scheduler.region_status.is_empty());
}
}

View File

@@ -16,7 +16,6 @@ use std::mem;
use std::sync::Arc;
use api::v1::{Mutation, OpType, Rows, WalEntry};
use common_query::Output;
use snafu::ResultExt;
use store_api::logstore::LogStore;
use store_api::storage::{RegionId, SequenceNumber};
@@ -57,8 +56,7 @@ impl WriteNotify {
.send_mut(Err(err.clone()).context(WriteGroupSnafu));
} else {
// Send success result.
self.sender
.send_mut(Ok(Output::AffectedRows(self.num_rows)));
self.sender.send_mut(Ok(self.num_rows));
}
}
}

View File

@@ -23,8 +23,6 @@ use api::helper::{
ColumnDataTypeWrapper,
};
use api::v1::{ColumnDataType, ColumnSchema, OpType, Rows, SemanticType, Value};
use common_query::Output;
use common_query::Output::AffectedRows;
use common_telemetry::{info, warn};
use datatypes::prelude::DataType;
use prometheus::HistogramTimer;
@@ -34,8 +32,9 @@ use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::{ColumnMetadata, RegionMetadata};
use store_api::region_engine::SetReadonlyResponse;
use store_api::region_request::{
RegionAlterRequest, RegionCloseRequest, RegionCompactRequest, RegionCreateRequest,
RegionDropRequest, RegionFlushRequest, RegionOpenRequest, RegionRequest, RegionTruncateRequest,
AffectedRows, RegionAlterRequest, RegionCloseRequest, RegionCompactRequest,
RegionCreateRequest, RegionDropRequest, RegionFlushRequest, RegionOpenRequest, RegionRequest,
RegionTruncateRequest,
};
use store_api::storage::{RegionId, SequenceNumber};
use tokio::sync::oneshot::{self, Receiver, Sender};
@@ -384,16 +383,16 @@ pub(crate) fn validate_proto_value(
/// Oneshot output result sender.
#[derive(Debug)]
pub(crate) struct OutputTx(Sender<Result<Output>>);
pub(crate) struct OutputTx(Sender<Result<AffectedRows>>);
impl OutputTx {
/// Creates a new output sender.
pub(crate) fn new(sender: Sender<Result<Output>>) -> OutputTx {
pub(crate) fn new(sender: Sender<Result<AffectedRows>>) -> OutputTx {
OutputTx(sender)
}
/// Sends the `result`.
pub(crate) fn send(self, result: Result<Output>) {
pub(crate) fn send(self, result: Result<AffectedRows>) {
// Ignores send result.
let _ = self.0.send(result);
}
@@ -415,14 +414,14 @@ impl OptionOutputTx {
}
/// Sends the `result` and consumes the inner sender.
pub(crate) fn send_mut(&mut self, result: Result<Output>) {
pub(crate) fn send_mut(&mut self, result: Result<AffectedRows>) {
if let Some(sender) = self.0.take() {
sender.send(result);
}
}
/// Sends the `result` and consumes the sender.
pub(crate) fn send(mut self, result: Result<Output>) {
pub(crate) fn send(mut self, result: Result<AffectedRows>) {
if let Some(sender) = self.0.take() {
sender.send(result);
}
@@ -434,8 +433,8 @@ impl OptionOutputTx {
}
}
impl From<Sender<Result<Output>>> for OptionOutputTx {
fn from(sender: Sender<Result<Output>>) -> Self {
impl From<Sender<Result<AffectedRows>>> for OptionOutputTx {
fn from(sender: Sender<Result<AffectedRows>>) -> Self {
Self::new(Some(OutputTx::new(sender)))
}
}
@@ -494,7 +493,7 @@ impl WorkerRequest {
pub(crate) fn try_from_region_request(
region_id: RegionId,
value: RegionRequest,
) -> Result<(WorkerRequest, Receiver<Result<Output>>)> {
) -> Result<(WorkerRequest, Receiver<Result<AffectedRows>>)> {
let (sender, receiver) = oneshot::channel();
let worker_request = match value {
RegionRequest::Put(v) => {
@@ -630,7 +629,7 @@ impl FlushFinished {
/// Marks the flush job as successful and observes the timer.
pub(crate) fn on_success(self) {
for sender in self.senders {
sender.send(Ok(Output::AffectedRows(0)));
sender.send(Ok(0));
}
}
}
@@ -685,7 +684,7 @@ impl CompactionFinished {
COMPACTION_ELAPSED_TOTAL.observe(self.start_time.elapsed().as_secs_f64());
for sender in self.senders {
sender.send(Ok(AffectedRows(0)));
sender.send(Ok(0));
}
info!("Successfully compacted region: {}", self.region_id);
}

View File

@@ -596,13 +596,10 @@ pub fn delete_rows_schema(request: &RegionCreateRequest) -> Vec<api::v1::ColumnS
/// Put rows into the engine.
pub async fn put_rows(engine: &MitoEngine, region_id: RegionId, rows: Rows) {
let num_rows = rows.rows.len();
let output = engine
let rows_inserted = engine
.handle_request(region_id, RegionRequest::Put(RegionPutRequest { rows }))
.await
.unwrap();
let Output::AffectedRows(rows_inserted) = output else {
unreachable!()
};
assert_eq!(num_rows, rows_inserted);
}
@@ -645,31 +642,25 @@ pub fn build_delete_rows_for_key(key: &str, start: usize, end: usize) -> Vec<Row
/// Delete rows from the engine.
pub async fn delete_rows(engine: &MitoEngine, region_id: RegionId, rows: Rows) {
let num_rows = rows.rows.len();
let output = engine
let rows_inserted = engine
.handle_request(
region_id,
RegionRequest::Delete(RegionDeleteRequest { rows }),
)
.await
.unwrap();
let Output::AffectedRows(rows_inserted) = output else {
unreachable!()
};
assert_eq!(num_rows, rows_inserted);
}
/// Flush a region manually.
pub async fn flush_region(engine: &MitoEngine, region_id: RegionId, row_group_size: Option<usize>) {
let Output::AffectedRows(rows) = engine
let rows = engine
.handle_request(
region_id,
RegionRequest::Flush(RegionFlushRequest { row_group_size }),
)
.await
.unwrap()
else {
unreachable!()
};
.unwrap();
assert_eq!(0, rows);
}

View File

@@ -16,7 +16,6 @@
use std::sync::Arc;
use common_query::Output;
use common_telemetry::{debug, error, info, warn};
use snafu::ResultExt;
use store_api::metadata::{RegionMetadata, RegionMetadataBuilder, RegionMetadataRef};
@@ -54,7 +53,7 @@ impl<S> RegionWorkerLoop<S> {
region_id, version.metadata.schema_version, request.schema_version
);
// Returns if it altered.
sender.send(Ok(Output::AffectedRows(0)));
sender.send(Ok(0));
return;
}
// Validate request.
@@ -69,7 +68,7 @@ impl<S> RegionWorkerLoop<S> {
"Ignores alter request as it alters nothing, region_id: {}, request: {:?}",
region_id, request
);
sender.send(Ok(Output::AffectedRows(0)));
sender.send(Ok(0));
return;
}
@@ -118,7 +117,7 @@ impl<S> RegionWorkerLoop<S> {
);
// Notifies waiters.
sender.send(Ok(Output::AffectedRows(0)));
sender.send(Ok(0));
}
}

View File

@@ -14,8 +14,8 @@
//! Handling close request.
use common_query::Output;
use common_telemetry::info;
use store_api::region_request::AffectedRows;
use store_api::storage::RegionId;
use crate::error::Result;
@@ -23,9 +23,12 @@ use crate::metrics::REGION_COUNT;
use crate::worker::RegionWorkerLoop;
impl<S> RegionWorkerLoop<S> {
pub(crate) async fn handle_close_request(&mut self, region_id: RegionId) -> Result<Output> {
pub(crate) async fn handle_close_request(
&mut self,
region_id: RegionId,
) -> Result<AffectedRows> {
let Some(region) = self.regions.get_region(region_id) else {
return Ok(Output::AffectedRows(0));
return Ok(0);
};
info!("Try to close region {}", region_id);
@@ -41,6 +44,6 @@ impl<S> RegionWorkerLoop<S> {
REGION_COUNT.dec();
Ok(Output::AffectedRows(0))
Ok(0)
}
}

View File

@@ -16,12 +16,11 @@
use std::sync::Arc;
use common_query::Output;
use common_telemetry::info;
use snafu::ResultExt;
use store_api::logstore::LogStore;
use store_api::metadata::RegionMetadataBuilder;
use store_api::region_request::RegionCreateRequest;
use store_api::region_request::{AffectedRows, RegionCreateRequest};
use store_api::storage::RegionId;
use crate::error::{InvalidMetadataSnafu, Result};
@@ -34,7 +33,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
&mut self,
region_id: RegionId,
request: RegionCreateRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
// Checks whether the table exists.
if let Some(region) = self.regions.get_region(region_id) {
// Region already exists.
@@ -45,7 +44,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
&request.primary_key,
)?;
return Ok(Output::AffectedRows(0));
return Ok(0);
}
// Convert the request into a RegionMetadata and validate it.
@@ -76,6 +75,6 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// Insert the MitoRegion into the RegionMap.
self.regions.insert_region(Arc::new(region));
Ok(Output::AffectedRows(0))
Ok(0)
}
}

View File

@@ -16,12 +16,12 @@
use std::time::Duration;
use common_query::Output;
use common_telemetry::{info, warn};
use futures::TryStreamExt;
use object_store::util::join_path;
use object_store::{EntryMode, ObjectStore};
use snafu::ResultExt;
use store_api::region_request::AffectedRows;
use store_api::storage::RegionId;
use tokio::time::sleep;
@@ -34,7 +34,10 @@ const GC_TASK_INTERVAL_SEC: u64 = 5 * 60; // 5 minutes
const MAX_RETRY_TIMES: u64 = 288; // 24 hours (5m * 288)
impl<S> RegionWorkerLoop<S> {
pub(crate) async fn handle_drop_request(&mut self, region_id: RegionId) -> Result<Output> {
pub(crate) async fn handle_drop_request(
&mut self,
region_id: RegionId,
) -> Result<AffectedRows> {
let region = self.regions.writable_region(region_id)?;
info!("Try to drop region: {}", region_id);
@@ -86,7 +89,7 @@ impl<S> RegionWorkerLoop<S> {
listener.on_later_drop_end(region_id, removed);
});
Ok(Output::AffectedRows(0))
Ok(0)
}
}

View File

@@ -16,12 +16,11 @@
use std::sync::Arc;
use common_query::Output;
use common_telemetry::info;
use object_store::util::join_path;
use snafu::{OptionExt, ResultExt};
use store_api::logstore::LogStore;
use store_api::region_request::RegionOpenRequest;
use store_api::region_request::{AffectedRows, RegionOpenRequest};
use store_api::storage::RegionId;
use crate::error::{ObjectStoreNotFoundSnafu, OpenDalSnafu, RegionNotFoundSnafu, Result};
@@ -35,9 +34,9 @@ impl<S: LogStore> RegionWorkerLoop<S> {
&mut self,
region_id: RegionId,
request: RegionOpenRequest,
) -> Result<Output> {
) -> Result<AffectedRows> {
if self.regions.is_region_exists(region_id) {
return Ok(Output::AffectedRows(0));
return Ok(0);
}
let object_store = if let Some(storage_name) = request.options.get("storage") {
self.object_store_manager
@@ -82,6 +81,6 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// Insert the MitoRegion into the RegionMap.
self.regions.insert_region(Arc::new(region));
Ok(Output::AffectedRows(0))
Ok(0)
}
}

View File

@@ -14,9 +14,9 @@
//! Handling truncate related requests.
use common_query::Output;
use common_telemetry::info;
use store_api::logstore::LogStore;
use store_api::region_request::AffectedRows;
use store_api::storage::RegionId;
use crate::error::Result;
@@ -24,7 +24,10 @@ use crate::manifest::action::{RegionMetaAction, RegionMetaActionList, RegionTrun
use crate::worker::RegionWorkerLoop;
impl<S: LogStore> RegionWorkerLoop<S> {
pub(crate) async fn handle_truncate_request(&mut self, region_id: RegionId) -> Result<Output> {
pub(crate) async fn handle_truncate_request(
&mut self,
region_id: RegionId,
) -> Result<AffectedRows> {
let region = self.regions.writable_region(region_id)?;
info!("Try to truncate region {}", region_id);
@@ -62,6 +65,6 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region_id, truncated_entry_id, truncated_sequence
);
Ok(Output::AffectedRows(0))
Ok(0)
}
}

View File

@@ -147,13 +147,12 @@ impl ErrorExt for Error {
| Deserialize { .. }
| FunctionInvalidArgument { .. }
| UnsupportedVectorMatch { .. }
| CombineTableColumnMismatch { .. } => StatusCode::InvalidArguments,
UnknownTable { .. }
| CombineTableColumnMismatch { .. }
| DataFusionPlanning { .. }
| UnexpectedPlanExpr { .. }
| IllegalRange { .. }
| EmptyRange { .. } => StatusCode::Internal,
| IllegalRange { .. } => StatusCode::InvalidArguments,
UnknownTable { .. } | EmptyRange { .. } => StatusCode::Internal,
TableNameNotFound { .. } => StatusCode::TableNotFound,

View File

@@ -97,7 +97,7 @@ pub(crate) fn linear_regression(
const_y = false;
}
count += 1.0;
let x = time - intercept_time as f64 / 1e3;
let x = (time - intercept_time as f64) / 1e3f64;
(sum_x, comp_x) = compensated_sum_inc(x, sum_x, comp_x);
(sum_y, comp_y) = compensated_sum_inc(value, sum_y, comp_y);
(sum_xy, comp_xy) = compensated_sum_inc(x * value, sum_xy, comp_xy);
@@ -188,8 +188,12 @@ mod test {
0.0, 10.0, 20.0, 30.0, 40.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0,
]);
let (slope, intercept) = linear_regression(&ts_array, &values_array, ts_array.value(0));
assert_eq!(slope, Some(0.010606060606060607));
assert_eq!(intercept, Some(6.818181818181818));
assert_eq!(slope, Some(10.606060606060607));
assert_eq!(intercept, Some(6.818181818181815));
let (slope, intercept) = linear_regression(&ts_array, &values_array, 3000);
assert_eq!(slope, Some(10.606060606060607));
assert_eq!(intercept, Some(38.63636363636364));
}
#[test]
@@ -219,8 +223,8 @@ mod test {
.into_iter()
.collect();
let (slope, intercept) = linear_regression(&ts_array, &values_array, ts_array.value(0));
assert_eq!(slope, Some(0.010606060606060607));
assert_eq!(intercept, Some(6.818181818181818));
assert_eq!(slope, Some(10.606060606060607));
assert_eq!(intercept, Some(6.818181818181815));
}
#[test]
@@ -231,4 +235,18 @@ mod test {
assert_eq!(slope, None);
assert_eq!(intercept, None);
}
// From prometheus `promql/functions_test.go` case `TestKahanSum`
#[test]
fn test_kahan_sum() {
let inputs = vec![1.0, 10.0f64.powf(100.0), 1.0, -1.0 * 10.0f64.powf(100.0)];
let mut sum = 0.0;
let mut c = 0f64;
for v in inputs {
(sum, c) = compensated_sum_inc(v, sum, c);
}
assert_eq!(sum + c, 2.0)
}
}

View File

@@ -42,6 +42,8 @@ pub fn deriv(times: &TimestampMillisecondArray, values: &Float64Array) -> Option
#[cfg(test)]
mod test {
use std::sync::Arc;
use super::*;
use crate::functions::test_util::simple_range_udf_runner;
@@ -73,7 +75,32 @@ mod test {
Deriv::scalar_udf(),
ts_array,
value_array,
vec![Some(0.010606060606060607), None],
vec![Some(10.606060606060607), None],
);
}
// From prometheus `promql/functions_test.go` case `TestDeriv`
#[test]
fn complicate_deriv() {
let start = 1493712816939;
let interval = 30 * 1000;
let mut ts_data = vec![];
for i in 0..15 {
let jitter = 12 * i % 2;
ts_data.push(Some(start + interval * i + jitter));
}
let val_data = vec![Some(1.0); 15];
let ts_array = Arc::new(TimestampMillisecondArray::from_iter(ts_data));
let val_array = Arc::new(Float64Array::from_iter(val_data));
let range = [(0, 15)];
let ts_range_array = RangeArray::from_ranges(ts_array, range).unwrap();
let value_range_array = RangeArray::from_ranges(val_array, range).unwrap();
simple_range_udf_runner(
Deriv::scalar_udf(),
ts_range_array,
value_range_array,
vec![Some(0.0)],
);
}
}

View File

@@ -30,6 +30,7 @@ use crate::functions::{extract_array, linear_regression};
use crate::range_array::RangeArray;
pub struct PredictLinear {
/// Duration. The second param of (`predict_linear(v range-vector, t scalar)`).
t: i64,
}
@@ -147,8 +148,9 @@ fn predict_linear_impl(
return None;
}
let intercept_time = timestamps.value(0);
let (slope, intercept) = linear_regression(timestamps, values, intercept_time);
// last timestamp is evaluation timestamp
let evaluate_ts = timestamps.value(timestamps.len() - 1);
let (slope, intercept) = linear_regression(timestamps, values, evaluate_ts);
if slope.is_none() || intercept.is_none() {
return None;
@@ -210,7 +212,7 @@ mod test {
ts_array,
value_array,
// value at t = 0
vec![Some(6.818181818181818)],
vec![Some(38.63636363636364)],
);
}
@@ -222,7 +224,7 @@ mod test {
ts_array,
value_array,
// value at t = 3000
vec![Some(38.63636363636364)],
vec![Some(31856.818181818187)],
);
}
@@ -234,7 +236,7 @@ mod test {
ts_array,
value_array,
// value at t = 4200
vec![Some(51.36363636363637)],
vec![Some(44584.09090909091)],
);
}
@@ -246,7 +248,7 @@ mod test {
ts_array,
value_array,
// value at t = 6600
vec![Some(76.81818181818181)],
vec![Some(70038.63636363638)],
);
}
@@ -258,7 +260,7 @@ mod test {
ts_array,
value_array,
// value at t = 7800
vec![Some(89.54545454545455)],
vec![Some(82765.9090909091)],
);
}
}

View File

@@ -1017,7 +1017,8 @@ impl PromPlanner {
}
"predict_linear" => {
let t_expr = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Time64Microsecond(Some(t)))) => t,
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t as i64,
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t,
other => UnexpectedPlanExprSnafu {
desc: format!("expect i64 literal as t, but found {:?}", other),
}

View File

@@ -1 +1 @@
v0.4.1
v0.4.2

View File

@@ -323,10 +323,7 @@ pub(crate) fn check(
// get GreptimeDB's version.
fn get_version() -> String {
format!(
"{}-greptime",
env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "unknown".to_string()),
)
format!("{}-greptime", env!("CARGO_PKG_VERSION"))
}
#[cfg(test)]
mod test {

View File

@@ -451,7 +451,6 @@ async fn test_query_concurrently() -> Result<()> {
Ok(())
}
#[ignore = "https://github.com/GreptimeTeam/greptimedb/issues/1385"]
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_query_prepared() -> Result<()> {
common_telemetry::init_default_ut_logging();

View File

@@ -170,6 +170,8 @@ impl<'a> ParserContext<'a> {
}
);
}
// Sorts options so that `test_display_create_table` can always pass.
let options = options.into_iter().sorted().collect();
let create_table = CreateTable {
if_not_exists,
name: table_name,

View File

@@ -241,7 +241,7 @@ mod tests {
PARTITION r2 VALUES LESS THAN (MAXVALUE),
)
engine=mito
with(regions=1, ttl='7d');
with(regions=1, ttl='7d', storage='File');
";
let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).unwrap();
assert_eq!(1, result.len());
@@ -267,6 +267,7 @@ PARTITION BY RANGE COLUMNS (ts) (
ENGINE=mito
WITH(
regions = 1,
storage = 'File',
ttl = '7d'
)"#,
&new_sql

View File

@@ -19,13 +19,12 @@ use std::sync::Arc;
use api::greptime_proto::v1::meta::{GrantedRegion as PbGrantedRegion, RegionRole as PbRegionRole};
use async_trait::async_trait;
use common_error::ext::BoxedError;
use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
use serde::{Deserialize, Serialize};
use crate::logstore::entry;
use crate::metadata::RegionMetadataRef;
use crate::region_request::RegionRequest;
use crate::region_request::{AffectedRows, RegionRequest};
use crate::storage::{RegionId, ScanRequest};
/// The result of setting readonly for the region.
@@ -114,14 +113,12 @@ pub trait RegionEngine: Send + Sync {
/// Name of this engine
fn name(&self) -> &str;
/// Handles request to the region.
///
/// Only query is not included, which is handled in `handle_query`
/// Handles non-query request to the region. Returns the count of affected rows.
async fn handle_request(
&self,
region_id: RegionId,
request: RegionRequest,
) -> Result<Output, BoxedError>;
) -> Result<AffectedRows, BoxedError>;
/// Handles substrait query and return a stream of record batches
async fn handle_query(

View File

@@ -28,6 +28,8 @@ use crate::metadata::{
use crate::path_utils::region_dir;
use crate::storage::{ColumnId, RegionId, ScanRequest};
pub type AffectedRows = usize;
#[derive(Debug, IntoStaticStr)]
pub enum RegionRequest {
Put(RegionPutRequest),

View File

@@ -84,6 +84,7 @@ pub struct TableOptions {
pub const WRITE_BUFFER_SIZE_KEY: &str = "write_buffer_size";
pub const TTL_KEY: &str = "ttl";
pub const REGIONS_KEY: &str = "regions";
pub const STORAGE_KEY: &str = "storage";
impl TryFrom<&HashMap<String, String>> for TableOptions {
type Error = error::Error;
@@ -340,6 +341,7 @@ pub fn valid_table_option(key: &str) -> bool {
| WRITE_BUFFER_SIZE_KEY
| TTL_KEY
| REGIONS_KEY
| STORAGE_KEY
) | is_supported_in_s3(key)
}
@@ -365,6 +367,7 @@ mod tests {
assert!(valid_table_option(TTL_KEY));
assert!(valid_table_option(REGIONS_KEY));
assert!(valid_table_option(WRITE_BUFFER_SIZE_KEY));
assert!(valid_table_option(STORAGE_KEY));
assert!(!valid_table_option("foo"));
}

View File

@@ -68,6 +68,7 @@ pub struct GreptimeDbClusterBuilder {
cluster_name: String,
kv_backend: KvBackendRef,
store_config: Option<ObjectStoreConfig>,
store_providers: Option<Vec<StorageType>>,
datanodes: Option<u32>,
}
@@ -92,6 +93,7 @@ impl GreptimeDbClusterBuilder {
cluster_name: cluster_name.to_string(),
kv_backend,
store_config: None,
store_providers: None,
datanodes: None,
}
}
@@ -101,6 +103,11 @@ impl GreptimeDbClusterBuilder {
self
}
pub fn with_store_providers(mut self, store_providers: Vec<StorageType>) -> Self {
self.store_providers = Some(store_providers);
self
}
pub fn with_datanodes(mut self, datanodes: u32) -> Self {
self.datanodes = Some(datanodes);
self
@@ -176,14 +183,15 @@ impl GreptimeDbClusterBuilder {
dir_guards.push(FileDirGuard::new(home_tmp_dir));
create_datanode_opts(store_config.clone(), home_dir)
create_datanode_opts(store_config.clone(), vec![], home_dir)
} else {
let (opts, guard) = create_tmp_dir_and_datanode_opts(
StorageType::File,
self.store_providers.clone().unwrap_or_default(),
&format!("{}-dn-{}", self.cluster_name, datanode_id),
);
storage_guards.push(guard.storage_guard);
storage_guards.push(guard.storage_guards);
dir_guards.push(guard.home_guard);
opts
@@ -195,7 +203,11 @@ impl GreptimeDbClusterBuilder {
instances.insert(datanode_id, datanode);
}
(instances, storage_guards, dir_guards)
(
instances,
storage_guards.into_iter().flatten().collect(),
dir_guards,
)
}
async fn wait_datanodes_alive(

View File

@@ -40,7 +40,8 @@ pub struct GreptimeDbStandalone {
pub struct GreptimeDbStandaloneBuilder {
instance_name: String,
store_type: Option<StorageType>,
store_providers: Option<Vec<StorageType>>,
default_store: Option<StorageType>,
plugin: Option<Plugins>,
}
@@ -48,14 +49,23 @@ impl GreptimeDbStandaloneBuilder {
pub fn new(instance_name: &str) -> Self {
Self {
instance_name: instance_name.to_string(),
store_type: None,
store_providers: None,
plugin: None,
default_store: None,
}
}
pub fn with_store_type(self, store_type: StorageType) -> Self {
pub fn with_default_store_type(self, store_type: StorageType) -> Self {
Self {
store_type: Some(store_type),
default_store: Some(store_type),
..self
}
}
#[cfg(test)]
pub fn with_store_providers(self, store_providers: Vec<StorageType>) -> Self {
Self {
store_providers: Some(store_providers),
..self
}
}
@@ -69,9 +79,11 @@ impl GreptimeDbStandaloneBuilder {
}
pub async fn build(self) -> GreptimeDbStandalone {
let store_type = self.store_type.unwrap_or(StorageType::File);
let default_store_type = self.default_store.unwrap_or(StorageType::File);
let store_types = self.store_providers.unwrap_or_default();
let (opts, guard) = create_tmp_dir_and_datanode_opts(store_type, &self.instance_name);
let (opts, guard) =
create_tmp_dir_and_datanode_opts(default_store_type, store_types, &self.instance_name);
let procedure_config = ProcedureConfig::default();
let kv_backend_config = KvBackendConfig::default();

View File

@@ -77,6 +77,26 @@ impl Display for StorageType {
}
impl StorageType {
pub fn build_storage_types_based_on_env() -> Vec<StorageType> {
let mut storage_types = Vec::with_capacity(4);
storage_types.push(StorageType::File);
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
if !bucket.is_empty() {
storage_types.push(StorageType::S3);
}
}
if env::var("GT_OSS_BUCKET").is_ok() {
storage_types.push(StorageType::Oss);
}
if env::var("GT_AZBLOB_CONTAINER").is_ok() {
storage_types.push(StorageType::Azblob);
}
if env::var("GT_GCS_BUCKET").is_ok() {
storage_types.push(StorageType::Gcs);
}
storage_types
}
pub fn test_on(&self) -> bool {
let _ = dotenv::dotenv();
@@ -244,7 +264,7 @@ pub enum TempDirGuard {
pub struct TestGuard {
pub home_guard: FileDirGuard,
pub storage_guard: StorageGuard,
pub storage_guards: Vec<StorageGuard>,
}
pub struct FileDirGuard {
@@ -261,42 +281,62 @@ pub struct StorageGuard(pub TempDirGuard);
impl TestGuard {
pub async fn remove_all(&mut self) {
if let TempDirGuard::S3(guard)
| TempDirGuard::Oss(guard)
| TempDirGuard::Azblob(guard)
| TempDirGuard::Gcs(guard) = &mut self.storage_guard.0
{
guard.remove_all().await.unwrap()
for storage_guard in self.storage_guards.iter_mut() {
if let TempDirGuard::S3(guard)
| TempDirGuard::Oss(guard)
| TempDirGuard::Azblob(guard)
| TempDirGuard::Gcs(guard) = &mut storage_guard.0
{
guard.remove_all().await.unwrap()
}
}
}
}
pub fn create_tmp_dir_and_datanode_opts(
store_type: StorageType,
default_store_type: StorageType,
store_provider_types: Vec<StorageType>,
name: &str,
) -> (DatanodeOptions, TestGuard) {
let home_tmp_dir = create_temp_dir(&format!("gt_data_{name}"));
let home_dir = home_tmp_dir.path().to_str().unwrap().to_string();
let (store, data_tmp_dir) = get_test_store_config(&store_type);
let opts = create_datanode_opts(store, home_dir);
// Excludes the default object store.
let mut store_providers = Vec::with_capacity(store_provider_types.len());
// Includes the default object store.
let mut storage_guards = Vec::with_capacity(store_provider_types.len() + 1);
let (default_store, data_tmp_dir) = get_test_store_config(&default_store_type);
storage_guards.push(StorageGuard(data_tmp_dir));
for store_type in store_provider_types {
let (store, data_tmp_dir) = get_test_store_config(&store_type);
store_providers.push(store);
storage_guards.push(StorageGuard(data_tmp_dir))
}
let opts = create_datanode_opts(default_store, store_providers, home_dir);
(
opts,
TestGuard {
home_guard: FileDirGuard::new(home_tmp_dir),
storage_guard: StorageGuard(data_tmp_dir),
storage_guards,
},
)
}
pub(crate) fn create_datanode_opts(store: ObjectStoreConfig, home_dir: String) -> DatanodeOptions {
pub(crate) fn create_datanode_opts(
default_store: ObjectStoreConfig,
providers: Vec<ObjectStoreConfig>,
home_dir: String,
) -> DatanodeOptions {
DatanodeOptions {
node_id: Some(0),
require_lease_before_startup: true,
storage: StorageConfig {
data_home: home_dir,
store,
providers,
store: default_store,
..Default::default()
},
mode: Mode::Standalone,
@@ -325,7 +365,7 @@ async fn setup_standalone_instance(
store_type: StorageType,
) -> GreptimeDbStandalone {
GreptimeDbStandaloneBuilder::new(test_name)
.with_store_type(store_type)
.with_default_store_type(store_type)
.build()
.await
}

View File

@@ -31,8 +31,9 @@ use session::context::{QueryContext, QueryContextRef};
use crate::test_util::check_output_stream;
use crate::tests::test_util::{
both_instances_cases, check_unordered_output_stream, distributed, find_testing_resource,
prepare_path, standalone, standalone_instance_case, MockInstance,
both_instances_cases, both_instances_cases_with_custom_storages, check_unordered_output_stream,
distributed, distributed_with_multiple_object_stores, find_testing_resource, prepare_path,
standalone, standalone_instance_case, standalone_with_multiple_object_stores, MockInstance,
};
#[apply(both_instances_cases)]
@@ -1840,3 +1841,120 @@ async fn execute_sql_with(
.await
.unwrap()
}
#[apply(both_instances_cases_with_custom_storages)]
async fn test_custom_storage(instance: Arc<dyn MockInstance>) {
let frontend = instance.frontend();
let custom_storages = [
("S3", "GT_S3_BUCKET"),
("Oss", "GT_OSS_BUCKET"),
("Azblob", "GT_AZBLOB_CONTAINER"),
("Gcs", "GT_GCS_BUCKET"),
];
for (storage_name, custom_storage_env) in custom_storages {
if let Ok(env_value) = env::var(custom_storage_env) {
if env_value.is_empty() {
continue;
}
let sql = if instance.is_distributed_mode() {
format!(
r#"create table test_table(
a int null primary key,
ts timestamp time index,
)
PARTITION BY RANGE COLUMNS (a) (
PARTITION r0 VALUES LESS THAN (1),
PARTITION r1 VALUES LESS THAN (10),
PARTITION r2 VALUES LESS THAN (100),
PARTITION r3 VALUES LESS THAN (MAXVALUE),
)
with(storage='{storage_name}')
"#
)
} else {
format!(
r#"create table test_table(a int primary key, ts timestamp time index)with(storage='{storage_name}');"#
)
};
let output = execute_sql(&instance.frontend(), &sql).await;
assert!(matches!(output, Output::AffectedRows(0)));
let output = execute_sql(
&frontend,
r#"insert into test_table(a, ts) values
(1, 1655276557000),
(1000, 1655276558000)
"#,
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(&frontend, "select * from test_table").await;
let expected = "\
+------+---------------------+
| a | ts |
+------+---------------------+
| 1 | 2022-06-15T07:02:37 |
| 1000 | 2022-06-15T07:02:38 |
+------+---------------------+";
check_output_stream(output, expected).await;
let output = execute_sql(&frontend, "show create table test_table").await;
let Output::RecordBatches(record_batches) = output else {
unreachable!()
};
let record_batches = record_batches.iter().collect::<Vec<_>>();
let column = record_batches[0].column_by_name("Create Table").unwrap();
let actual = column.get(0);
let expect = if instance.is_distributed_mode() {
format!(
r#"CREATE TABLE IF NOT EXISTS "test_table" (
"a" INT NULL,
"ts" TIMESTAMP(3) NOT NULL,
TIME INDEX ("ts"),
PRIMARY KEY ("a")
)
PARTITION BY RANGE COLUMNS ("a") (
PARTITION r0 VALUES LESS THAN (1),
PARTITION r1 VALUES LESS THAN (10),
PARTITION r2 VALUES LESS THAN (100),
PARTITION r3 VALUES LESS THAN (MAXVALUE)
)
ENGINE=mito
WITH(
regions = 4,
storage = '{storage_name}'
)"#
)
} else {
format!(
r#"CREATE TABLE IF NOT EXISTS "test_table" (
"a" INT NULL,
"ts" TIMESTAMP(3) NOT NULL,
TIME INDEX ("ts"),
PRIMARY KEY ("a")
)
ENGINE=mito
WITH(
regions = 1,
storage = '{storage_name}'
)"#
)
};
assert_eq!(actual.to_string(), expect);
let output = execute_sql(&frontend, "truncate test_table").await;
assert!(matches!(output, Output::AffectedRows(0)));
let output = execute_sql(&frontend, "select * from test_table").await;
let expected = "\
++
++";
check_output_stream(output, expected).await;
let output = execute_sql(&frontend, "drop table test_table").await;
assert!(matches!(output, Output::AffectedRows(0)));
}
}
}

View File

@@ -20,7 +20,9 @@ use common_test_util::find_workspace_path;
use frontend::instance::Instance;
use rstest_reuse::{self, template};
use crate::cluster::GreptimeDbClusterBuilder;
use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder};
use crate::test_util::StorageType;
use crate::tests::{create_distributed_instance, MockDistributedInstance};
pub(crate) trait MockInstance {
@@ -61,6 +63,42 @@ pub(crate) async fn distributed() -> Arc<dyn MockInstance> {
Arc::new(instance)
}
pub(crate) async fn standalone_with_multiple_object_stores() -> Arc<dyn MockInstance> {
let _ = dotenv::dotenv();
let test_name = uuid::Uuid::new_v4().to_string();
let storage_types = StorageType::build_storage_types_based_on_env();
let instance = GreptimeDbStandaloneBuilder::new(&test_name)
.with_store_providers(storage_types)
.build()
.await;
Arc::new(instance)
}
pub(crate) async fn distributed_with_multiple_object_stores() -> Arc<dyn MockInstance> {
let _ = dotenv::dotenv();
let test_name = uuid::Uuid::new_v4().to_string();
let providers = StorageType::build_storage_types_based_on_env();
let cluster = GreptimeDbClusterBuilder::new(&test_name)
.await
.with_store_providers(providers)
.build()
.await;
Arc::new(MockDistributedInstance(cluster))
}
#[template]
#[rstest]
#[case::test_with_standalone(standalone_with_multiple_object_stores())]
#[case::test_with_distributed(distributed_with_multiple_object_stores())]
#[awt]
#[tokio::test(flavor = "multi_thread")]
pub(crate) fn both_instances_cases_with_custom_storages(
#[future]
#[case]
instance: Arc<dyn MockInstance>,
) {
}
#[template]
#[rstest]
#[case::test_with_standalone(standalone())]

View File

@@ -712,6 +712,7 @@ sync_write = false
[datanode.storage]
type = "{}"
providers = []
[[datanode.region_engine]]

View File

@@ -0,0 +1,164 @@
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+----------------------------------------------------------------------------------------+
| date_add(Utf8("2023-12-06 07:39:46.222"),IntervalMonthDayNano("92233720368547758080")) |
+----------------------------------------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+----------------------------------------------------------------------------------------+
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
+---------------------------------------------------------+
| date_add(Utf8("2023-12-06 07:39:46.222"),Utf8("5 day")) |
+---------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+---------------------------------------------------------+
SELECT date_add('2023-12-06'::DATE, INTERVAL '3 month 5 day');
+-------------------------------------------------------------------------------------+
| date_add(Utf8("2023-12-06"),IntervalMonthDayNano("237684487635026733149179609088")) |
+-------------------------------------------------------------------------------------+
| 2024-03-11 |
+-------------------------------------------------------------------------------------+
SELECT date_add('2023-12-06'::DATE, '3 month 5 day');
+----------------------------------------------------+
| date_add(Utf8("2023-12-06"),Utf8("3 month 5 day")) |
+----------------------------------------------------+
| 2024-03-11 |
+----------------------------------------------------+
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+----------------------------------------------------------------------------------------+
| date_sub(Utf8("2023-12-06 07:39:46.222"),IntervalMonthDayNano("92233720368547758080")) |
+----------------------------------------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+----------------------------------------------------------------------------------------+
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
+---------------------------------------------------------+
| date_sub(Utf8("2023-12-06 07:39:46.222"),Utf8("5 day")) |
+---------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+---------------------------------------------------------+
SELECT date_sub('2023-12-06'::DATE, INTERVAL '3 month 5 day');
+-------------------------------------------------------------------------------------+
| date_sub(Utf8("2023-12-06"),IntervalMonthDayNano("237684487635026733149179609088")) |
+-------------------------------------------------------------------------------------+
| 2023-09-01 |
+-------------------------------------------------------------------------------------+
SELECT date_sub('2023-12-06'::DATE, '3 month 5 day');
+----------------------------------------------------+
| date_sub(Utf8("2023-12-06"),Utf8("3 month 5 day")) |
+----------------------------------------------------+
| 2023-09-01 |
+----------------------------------------------------+
CREATE TABLE dates(d DATE, ts timestamp time index);
Affected Rows: 0
INSERT INTO dates VALUES ('1992-01-01'::DATE, 1);
Affected Rows: 1
INSERT INTO dates VALUES ('1993-12-30'::DATE, 2);
Affected Rows: 1
INSERT INTO dates VALUES ('2023-12-06'::DATE, 3);
Affected Rows: 1
SELECT date_add(d, INTERVAL '1 year 2 month 3 day') from dates;
+---------------------------------------------------------------------------+
| date_add(dates.d,IntervalMonthDayNano("1109194275255040958530743959552")) |
+---------------------------------------------------------------------------+
| 1993-03-04 |
| 1995-03-03 |
| 2025-02-09 |
+---------------------------------------------------------------------------+
SELECT date_add(d, '1 year 2 month 3 day') from dates;
+------------------------------------------------+
| date_add(dates.d,Utf8("1 year 2 month 3 day")) |
+------------------------------------------------+
| 1993-03-04 |
| 1995-03-03 |
| 2025-02-09 |
+------------------------------------------------+
SELECT date_add(ts, INTERVAL '1 year 2 month 3 day') from dates;
+----------------------------------------------------------------------------+
| date_add(dates.ts,IntervalMonthDayNano("1109194275255040958530743959552")) |
+----------------------------------------------------------------------------+
| 1971-03-04T00:00:00.001 |
| 1971-03-04T00:00:00.002 |
| 1971-03-04T00:00:00.003 |
+----------------------------------------------------------------------------+
SELECT date_add(ts, '1 year 2 month 3 day') from dates;
+-------------------------------------------------+
| date_add(dates.ts,Utf8("1 year 2 month 3 day")) |
+-------------------------------------------------+
| 1971-03-04T00:00:00.001 |
| 1971-03-04T00:00:00.002 |
| 1971-03-04T00:00:00.003 |
+-------------------------------------------------+
SELECT date_sub(d, INTERVAL '1 year 2 month 3 day') from dates;
+---------------------------------------------------------------------------+
| date_sub(dates.d,IntervalMonthDayNano("1109194275255040958530743959552")) |
+---------------------------------------------------------------------------+
| 1990-10-29 |
| 1992-10-27 |
| 2022-10-03 |
+---------------------------------------------------------------------------+
SELECT date_sub(d, '1 year 2 month 3 day') from dates;
+------------------------------------------------+
| date_sub(dates.d,Utf8("1 year 2 month 3 day")) |
+------------------------------------------------+
| 1990-10-29 |
| 1992-10-27 |
| 2022-10-03 |
+------------------------------------------------+
SELECT date_sub(ts, INTERVAL '1 year 2 month 3 day') from dates;
+----------------------------------------------------------------------------+
| date_sub(dates.ts,IntervalMonthDayNano("1109194275255040958530743959552")) |
+----------------------------------------------------------------------------+
| 1968-10-29T00:00:00.001 |
| 1968-10-29T00:00:00.002 |
| 1968-10-29T00:00:00.003 |
+----------------------------------------------------------------------------+
SELECT date_sub(ts, '1 year 2 month 3 day') from dates;
+-------------------------------------------------+
| date_sub(dates.ts,Utf8("1 year 2 month 3 day")) |
+-------------------------------------------------+
| 1968-10-29T00:00:00.001 |
| 1968-10-29T00:00:00.002 |
| 1968-10-29T00:00:00.003 |
+-------------------------------------------------+
DROP TABLE dates;
Affected Rows: 0

View File

@@ -0,0 +1,42 @@
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
SELECT date_add('2023-12-06'::DATE, INTERVAL '3 month 5 day');
SELECT date_add('2023-12-06'::DATE, '3 month 5 day');
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
SELECT date_sub('2023-12-06'::DATE, INTERVAL '3 month 5 day');
SELECT date_sub('2023-12-06'::DATE, '3 month 5 day');
CREATE TABLE dates(d DATE, ts timestamp time index);
INSERT INTO dates VALUES ('1992-01-01'::DATE, 1);
INSERT INTO dates VALUES ('1993-12-30'::DATE, 2);
INSERT INTO dates VALUES ('2023-12-06'::DATE, 3);
SELECT date_add(d, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_add(d, '1 year 2 month 3 day') from dates;
SELECT date_add(ts, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_add(ts, '1 year 2 month 3 day') from dates;
SELECT date_sub(d, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_sub(d, '1 year 2 month 3 day') from dates;
SELECT date_sub(ts, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_sub(ts, '1 year 2 month 3 day') from dates;
DROP TABLE dates;

View File

@@ -100,3 +100,24 @@ WITH(
Error: 1004(InvalidArguments), Invalid table option key: foo
CREATE TABLE not_supported_table_storage_option (
id INT UNSIGNED,
host STRING,
cpu DOUBLE,
disk FLOAT,
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
TIME INDEX (ts),
PRIMARY KEY (id, host)
)
PARTITION BY RANGE COLUMNS (id) (
PARTITION r0 VALUES LESS THAN (5),
PARTITION r1 VALUES LESS THAN (9),
PARTITION r2 VALUES LESS THAN (MAXVALUE),
)
ENGINE=mito
WITH(
storage = 'S3'
);
Error: 1004(InvalidArguments), Object store not found: s3

View File

@@ -50,3 +50,21 @@ WITH(
ttl = '7d',
write_buffer_size = 1024
);
CREATE TABLE not_supported_table_storage_option (
id INT UNSIGNED,
host STRING,
cpu DOUBLE,
disk FLOAT,
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
TIME INDEX (ts),
PRIMARY KEY (id, host)
)
PARTITION BY RANGE COLUMNS (id) (
PARTITION r0 VALUES LESS THAN (5),
PARTITION r1 VALUES LESS THAN (9),
PARTITION r2 VALUES LESS THAN (MAXVALUE),
)
ENGINE=mito
WITH(
storage = 'S3'
);