Compare commits

...

13 Commits

Author SHA1 Message Date
Ruihang Xia
710a68d2d6 chore: add deprecate develop branch warning
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-28 14:31:38 +08:00
Weny Xu
485a91f49a feat: implement handle upgrade region instruction (#3013)
* feat: implement task tracker

* feat: implement handle upgrade region instruction

* refactor: remove redundant code

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* refactor: refactor wait_for_replay_millis to wait_for_replay_timeout

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2023-12-28 02:08:47 +00:00
Ruihang Xia
bd0eed7af9 chore: do not send message for xlarge PR (#3020)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-27 13:22:19 +00:00
zyy17
b8b1e98399 refactor: use string type instead of Option type for '--store-key-prefix' (#3018)
* refactor: use string type instead of Option type for '--store-key-prefix'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* chore: refine for code review comments

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-12-27 11:26:30 +00:00
Weny Xu
abeb32e042 feat: implement region migration manager (#3014)
* feat: implement region migration manager

* Update src/meta-srv/src/procedure/region_migration/manager.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* chore: apply suggestions from CR

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-12-27 10:50:10 +00:00
Weny Xu
840e94630d feat: implement param parsing of SubmitRegionMigrationTaskHandler (#3015)
* feat: implement param parsing of `SubmitMigrationTaskHandler`

* chore: apply suggestions from CR

* refactor: change `SubmitRegionMigrationTaskParams` to `SubmitRegionMigrationTaskRequest`
2023-12-27 10:08:54 +00:00
Wei
43e3a77263 fix: decimal128 ScalarValue to Value (#3019)
fix: decimal128 scalarvalue to value
2023-12-27 10:03:37 +00:00
WU Jingdi
d1ee1ba56a feat: support set timezone in db (#2992)
* feat: support set timezone in db

* chore: fix  ci

* chore: fix code advice

* fix: rename `time_zone` to `timezone`
2023-12-27 09:19:39 +00:00
Ning Sun
feec4e289d feat: upgrade pgwire to 0.18 for corrected statement caching (#3010) 2023-12-27 03:02:25 +00:00
Ruihang Xia
718447c542 docs: RFC of enclosing column id (#2983)
* docs: RFC of enclosing column id

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update docs/rfcs/2023-12-22-enclose-column-id.md

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* accomplish the first point

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-12-27 02:54:53 +00:00
LFC
eadde72973 chore: "fix: revert unfinished route table change" (#3009)
Revert "fix: revert unfinished route table change (#3008)"

This reverts commit 8ce8a8f3c7.
2023-12-27 02:40:59 +00:00
Ning Sun
7c5c75568d chore: try to fix size labeller (#3012) 2023-12-26 21:36:44 +08:00
Ruihang Xia
1c9bf2e2a7 fix: change CI target repo to the origin one (#3011)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-26 21:15:44 +08:00
95 changed files with 3023 additions and 1232 deletions

View File

@@ -1,6 +1,6 @@
name: "PR Doc Labeler"
on:
pull_request:
pull_request_target:
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
permissions:

View File

@@ -1,11 +1,14 @@
name: size-labeler
on: [pull_request]
on: [pull_request_target]
jobs:
labeler:
runs-on: ubuntu-latest
name: Label the PR size
permissions:
issues: write
pull-requests: write
steps:
- uses: codelytv/pr-size-labeler@v1
with:
@@ -18,9 +21,5 @@ jobs:
l_max_size: '1000'
xl_label: 'Size: XL'
fail_if_xl: 'false'
message_if_xl: >
This PR exceeds the recommended size of 1000 lines.
Please make sure you are NOT addressing multiple issues with one PR.
Note this PR might be rejected due to its size.
github_api_url: 'api.github.com'
message_if_xl: ""
files_to_ignore: 'Cargo.lock'

722
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -27,6 +27,9 @@
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
> [!WARNING]
> Our default branch has changed from `develop` to `main` (issue [#3025](https://github.com/GreptimeTeam/greptimedb/issues/3025)). Please update your local repository to use the `main` branch.
## What is GreptimeDB
GreptimeDB is an open-source time-series database with a special focus on

View File

@@ -1,5 +1,7 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# The default timezone of the server
# default_timezone = "UTC"
[heartbeat]
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.

View File

@@ -15,6 +15,8 @@ selector = "lease_based"
use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
# Log options, see `standalone.example.toml`
# [logging]

View File

@@ -2,6 +2,8 @@
mode = "standalone"
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# The default timezone of the server
# default_timezone = "UTC"
# HTTP server options.
[http]

View File

@@ -0,0 +1,44 @@
---
Feature Name: Enclose Column Id
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2982
Date: 2023-12-22
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
# Summary
This RFC proposes to enclose the usage of `ColumnId` into the region engine only.
# Motivation
`ColumnId` is an identifier for columns. It's assigned by meta server, stored in `TableInfo` and `RegionMetadata` and used in region engine to distinguish columns.
At present, Both Frontend, Datanode and Metasrv are aware of `ColumnId` but it's only used in region engine. Thus this RFC proposes to remove it from Frontend (mainly used in `TableInfo`) and Metasrv.
# Details
`ColumnId` is used widely on both read and write paths. Removing it from Frontend and Metasrv implies several things:
- A column may have different column id in different regions.
- A column is identified by its name in all components.
- Column order in the region engine is not restricted, i.e., no need to be in the same order with table info.
The first thing doesn't matter IMO. This concept doesn't exist anymore outside of region server, and each region is autonomous and independent -- the only guarantee it should hold is those columns exist. But if we consider region repartition, where the SST file would be re-assign to different regions, things would become a bit more complicated. A possible solution is store the relation between name and ColumnId in the manifest, but it's out of the scope of this RFC. We can likely give a workaround by introducing a indirection mapping layer of different version of partitions.
And more importantly, we can still assume columns have the same column ids across regions. We have procedure to maintain consistency between regions and the region engine should ensure alterations are idempotent. So it is possible that region repartition doesn't need to consider column ids or other region metadata in the future.
Users write and query column by their names, not by ColumnId or something else. The second point also means to change the column reference in ScanRequest from index to name. This change can hugely alleviate the misuse of the column index, which has given us many surprises.
And for the last one, column order only matters in table info. This order is used in user-faced table structure operation, like add column, describe column or as the default order of INSERT clause. None of them is connected with the order in storage.
# Drawback
Firstly, this is a breaking change. Delivering this change requires a full upgrade of the cluster. Secondly, this change may introduce some performance regression. For example, we have to pass the full table name in the `ScanRequest` instead of the `ColumnId`. But this influence is very limited, since the column index is only used in the region engine.
# Alternatives
There are two alternatives from the perspective of "what can be used as the column identifier":
- Index of column to the table schema
- `ColumnId` of that column
The first one is what we are using now. By choosing this way, it's required to keep the column order in the region engine the same as the table info. This is not hard to achieve, but it's a bit annoying. And things become tricky when there is internal column or different schemas like those stored in file format. And this is the initial purpose of this RFC, which is trying to decouple the table schema and region schema.
The second one, in other hand, requires the `ColumnId` should be identical in all regions and `TableInfo`. It has the same drawback with the previous alternative, that the `TableInfo` and `RegionMetadata` are tighted together. Another point is that the `ColumnId` is assigned by the Metasrv, who doesn't need it but have to maintain it. And this also limits the functionality of `ColumnId`, by taking the ability of assigning it from concrete region engine.

View File

@@ -32,6 +32,7 @@ common-recordbatch.workspace = true
common-telemetry = { workspace = true, features = [
"deadlock_detection",
] }
common-time.workspace = true
config = "0.13"
datanode.workspace = true
datatypes.workspace = true

View File

@@ -14,6 +14,7 @@
use std::time::Instant;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use common_meta::table_name::TableName;
@@ -53,7 +54,11 @@ impl TableMetadataBencher {
let start = Instant::now();
self.table_metadata_manager
.create_table_metadata(table_info, region_routes, region_wal_options)
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
region_wal_options,
)
.await
.unwrap();

View File

@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
use common_meta::key::{RegionDistribution, TableMetaKey};
use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::PaginationStream;
@@ -153,7 +153,7 @@ impl MigrateTableMetadata {
)
.unwrap();
let new_table_value = NextTableRouteValue::new(table_route.region_routes);
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
let table_id = table_route.table.id as u32;
let new_key = TableRouteKey::new(table_id);

View File

@@ -43,6 +43,12 @@ pub enum Error {
source: common_meta::error::Error,
},
#[snafu(display("Failed to init default timezone"))]
InitTimezone {
location: Location,
source: common_time::error::Error,
},
#[snafu(display("Failed to start procedure manager"))]
StartProcedureManager {
location: Location,
@@ -268,6 +274,7 @@ impl ErrorExt for Error {
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::InitTimezone { .. }
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }

View File

@@ -22,6 +22,7 @@ use client::client_manager::DatanodeClients;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging;
use common_time::timezone::set_default_timezone;
use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
@@ -32,7 +33,7 @@ use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::{OptionExt, ResultExt};
use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
use crate::options::{CliOptions, Options};
use crate::App;
@@ -217,6 +218,8 @@ impl StartCommand {
logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts);
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
msg: "'meta_client'",
})?;

View File

@@ -119,8 +119,8 @@ struct StartCommand {
data_home: Option<String>,
/// If it's not empty, the metasrv will store all data with this key prefix.
#[clap(long)]
store_key_prefix: Option<String>,
#[clap(long, default_value = "")]
store_key_prefix: String,
}
impl StartCommand {
@@ -177,7 +177,9 @@ impl StartCommand {
opts.data_home = data_home.clone();
}
opts.store_key_prefix = self.store_key_prefix.clone();
if !self.store_key_prefix.is_empty() {
opts.store_key_prefix = self.store_key_prefix.clone()
}
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;

View File

@@ -32,6 +32,7 @@ use common_meta::wal::{WalOptionsAllocator, WalOptionsAllocatorRef};
use common_procedure::ProcedureManagerRef;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
use common_time::timezone::set_default_timezone;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use file_engine::config::EngineConfig as FileEngineConfig;
@@ -51,8 +52,8 @@ use servers::Mode;
use snafu::ResultExt;
use crate::error::{
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu,
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{CliOptions, MixOptions, Options};
@@ -98,6 +99,7 @@ impl SubCommand {
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_telemetry: bool,
pub default_timezone: Option<String>,
pub http: HttpOptions,
pub grpc: GrpcOptions,
pub mysql: MysqlOptions,
@@ -121,6 +123,7 @@ impl Default for StandaloneOptions {
Self {
mode: Mode::Standalone,
enable_telemetry: true,
default_timezone: None,
http: HttpOptions::default(),
grpc: GrpcOptions::default(),
mysql: MysqlOptions::default(),
@@ -147,6 +150,7 @@ impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
mode: self.mode,
default_timezone: self.default_timezone,
http: self.http,
grpc: self.grpc,
mysql: self.mysql,
@@ -369,6 +373,9 @@ impl StartCommand {
info!("Building standalone instance with {opts:#?}");
set_default_timezone(opts.frontend.default_timezone.as_deref())
.context(InitTimezoneSnafu)?;
// Ensure the data_home directory exists.
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
dir: &opts.data_home,

View File

@@ -21,10 +21,10 @@ use store_api::storage::{RegionNumber, TableId};
use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::error::Result;
use crate::key::table_route::TableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{CreateTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::router::RegionRoute;
pub mod alter_table;
pub mod create_table;
@@ -58,7 +58,7 @@ pub struct TableMetadata {
/// Table id.
pub table_id: TableId,
/// Route information for each region of the table.
pub region_routes: Vec<RegionRoute>,
pub table_route: TableRouteValue,
/// The encoded wal options for regions of the table.
// If a region does not have an associated wal options, no key for the region would be found in the map.
pub region_wal_options: HashMap<RegionNumber, String>,

View File

@@ -45,7 +45,6 @@ use crate::error::{
};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
@@ -183,25 +182,23 @@ impl AlterTableProcedure {
pub async fn submit_alter_region_requests(&mut self) -> Result<Status> {
let table_id = self.data.table_id();
let table_ref = self.data.table_ref();
let TableRouteValue { region_routes, .. } = self
let table_route = self
.context
.table_metadata_manager
.table_route_manager()
.get(table_id)
.await?
.with_context(|| TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?
.context(TableRouteNotFoundSnafu { table_id })?
.into_inner();
let region_routes = table_route.region_routes();
let leaders = find_leaders(&region_routes);
let leaders = find_leaders(region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
let requester = self.context.datanode_manager.datanode(&datanode).await;
let regions = find_leader_regions(&region_routes, &datanode);
let regions = find_leader_regions(region_routes, &datanode);
for region in regions {
let region_id = RegionId::new(table_id, region);

View File

@@ -18,9 +18,8 @@ use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{
CreateRequest as PbCreateRegionRequest, RegionColumnDef, RegionRequest, RegionRequestHeader,
};
use api::v1::{ColumnDef, CreateTableExpr, SemanticType};
use api::v1::{ColumnDef, SemanticType};
use async_trait::async_trait;
use common_catalog::consts::METRIC_ENGINE;
use common_config::WAL_OPTIONS_KEY;
use common_error::ext::BoxedError;
use common_procedure::error::{
@@ -40,8 +39,9 @@ use table::metadata::{RawTableInfo, TableId};
use crate::ddl::utils::{handle_operate_region_error, handle_retry_error, region_storage_path};
use crate::ddl::DdlContext;
use crate::error::{self, Result, TableInfoNotFoundSnafu};
use crate::error::{self, Result, TableRouteNotFoundSnafu};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
@@ -60,13 +60,13 @@ impl CreateTableProcedure {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
region_routes: Vec<RegionRoute>,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
context: DdlContext,
) -> Self {
Self {
context,
creator: TableCreator::new(cluster_id, task, region_routes, region_wal_options),
creator: TableCreator::new(cluster_id, task, table_route, region_wal_options),
}
}
@@ -78,10 +78,12 @@ impl CreateTableProcedure {
opening_regions: vec![],
};
creator
.register_opening_regions(&context)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
if let TableRouteValue::Physical(x) = &creator.data.table_route {
creator.opening_regions = creator
.register_opening_regions(&context, &x.region_routes)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
}
Ok(CreateTableProcedure { context, creator })
}
@@ -94,10 +96,6 @@ impl CreateTableProcedure {
self.table_info().ident.table_id
}
pub fn region_routes(&self) -> &Vec<RegionRoute> {
&self.creator.data.region_routes
}
pub fn region_wal_options(&self) -> &HashMap<RegionNumber, String> {
&self.creator.data.region_wal_options
}
@@ -132,7 +130,10 @@ impl CreateTableProcedure {
Ok(Status::executing(true))
}
pub fn new_region_request_builder(&self) -> Result<CreateRequestBuilder> {
pub fn new_region_request_builder(
&self,
physical_table_id: Option<TableId>,
) -> Result<CreateRequestBuilder> {
let create_table_expr = &self.creator.data.task.create_table;
let column_defs = create_table_expr
@@ -191,16 +192,54 @@ impl CreateTableProcedure {
options: create_table_expr.table_options.clone(),
};
let builder = CreateRequestBuilder::new_template(self.context.clone(), template);
Ok(builder)
Ok(CreateRequestBuilder {
template,
physical_table_id,
})
}
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
match &self.creator.data.table_route {
TableRouteValue::Physical(x) => {
let region_routes = x.region_routes.clone();
let request_builder = self.new_region_request_builder(None)?;
self.create_regions(&region_routes, request_builder).await
}
TableRouteValue::Logical(x) => {
let physical_table_id = x.physical_table_id();
let physical_table_route = self
.context
.table_metadata_manager
.table_route_manager()
.get(physical_table_id)
.await?
.context(TableRouteNotFoundSnafu {
table_id: physical_table_id,
})?;
let region_routes = physical_table_route.region_routes();
let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
self.create_regions(region_routes, request_builder).await
}
}
}
async fn create_regions(
&mut self,
region_routes: &[RegionRoute],
request_builder: CreateRequestBuilder,
) -> Result<Status> {
// Registers opening regions
self.creator.register_opening_regions(&self.context)?;
let guards = self
.creator
.register_opening_regions(&self.context, region_routes)?;
if !guards.is_empty() {
self.creator.opening_regions = guards;
}
let create_table_data = &self.creator.data;
let region_routes = &create_table_data.region_routes;
let region_wal_options = &create_table_data.region_wal_options;
let create_table_expr = &create_table_data.task.create_table;
@@ -208,8 +247,6 @@ impl CreateTableProcedure {
let schema = &create_table_expr.schema_name;
let storage_path = region_storage_path(catalog, schema);
let mut request_builder = self.new_region_request_builder()?;
let leaders = find_leaders(region_routes);
let mut create_region_tasks = Vec::with_capacity(leaders.len());
@@ -221,12 +258,7 @@ impl CreateTableProcedure {
for region_number in regions {
let region_id = RegionId::new(self.table_id(), region_number);
let create_region_request = request_builder
.build_one(
&self.creator.data.task.create_table,
region_id,
storage_path.clone(),
region_wal_options,
)
.build_one(region_id, storage_path.clone(), region_wal_options)
.await?;
requests.push(PbRegionRequest::Create(create_region_request));
@@ -270,10 +302,13 @@ impl CreateTableProcedure {
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
let region_routes = self.region_routes().clone();
let region_wal_options = self.region_wal_options().clone();
manager
.create_table_metadata(raw_table_info, region_routes, region_wal_options)
.create_table_metadata(
raw_table_info,
self.creator.data.table_route.clone(),
region_wal_options,
)
.await?;
info!("Created table metadata for table {table_id}");
@@ -329,7 +364,7 @@ impl TableCreator {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
region_routes: Vec<RegionRoute>,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Self {
Self {
@@ -337,21 +372,23 @@ impl TableCreator {
state: CreateTableState::Prepare,
cluster_id,
task,
region_routes,
table_route,
region_wal_options,
},
opening_regions: vec![],
}
}
/// Register opening regions if doesn't exist.
pub fn register_opening_regions(&mut self, context: &DdlContext) -> Result<()> {
let region_routes = &self.data.region_routes;
/// Registers and returns the guards of the opening region if they don't exist.
fn register_opening_regions(
&self,
context: &DdlContext,
region_routes: &[RegionRoute],
) -> Result<Vec<OperatingRegionGuard>> {
let opening_regions = operating_leader_regions(region_routes);
if self.opening_regions.len() == opening_regions.len() {
return Ok(());
return Ok(vec![]);
}
let mut opening_region_guards = Vec::with_capacity(opening_regions.len());
@@ -366,9 +403,7 @@ impl TableCreator {
})?;
opening_region_guards.push(guard);
}
self.opening_regions = opening_region_guards;
Ok(())
Ok(opening_region_guards)
}
}
@@ -386,7 +421,7 @@ pub enum CreateTableState {
pub struct CreateTableData {
pub state: CreateTableState,
pub task: CreateTableTask,
pub region_routes: Vec<RegionRoute>,
table_route: TableRouteValue,
pub region_wal_options: HashMap<RegionNumber, String>,
pub cluster_id: u64,
}
@@ -399,28 +434,18 @@ impl CreateTableData {
/// Builder for [PbCreateRegionRequest].
pub struct CreateRequestBuilder {
context: DdlContext,
template: PbCreateRegionRequest,
/// Optional. Only for metric engine.
physical_table_id: Option<TableId>,
}
impl CreateRequestBuilder {
fn new_template(context: DdlContext, template: PbCreateRegionRequest) -> Self {
Self {
context,
template,
physical_table_id: None,
}
}
pub fn template(&self) -> &PbCreateRegionRequest {
&self.template
}
async fn build_one(
&mut self,
create_expr: &CreateTableExpr,
&self,
region_id: RegionId,
storage_path: String,
region_wal_options: &HashMap<RegionNumber, String>,
@@ -438,49 +463,18 @@ impl CreateRequestBuilder {
.insert(WAL_OPTIONS_KEY.to_string(), wal_options.clone())
});
if self.template.engine == METRIC_ENGINE {
self.metric_engine_hook(create_expr, region_id, &mut request)
.await?;
}
if let Some(physical_table_id) = self.physical_table_id {
// Logical table has the same region numbers with physical table, and they have a one-to-one mapping.
// For example, region 0 of logical table must resides with region 0 of physical table. So here we can
// simply concat the physical table id and the logical region number to get the physical region id.
let physical_region_id = RegionId::new(physical_table_id, region_id.region_number());
Ok(request)
}
async fn metric_engine_hook(
&mut self,
create_expr: &CreateTableExpr,
region_id: RegionId,
request: &mut PbCreateRegionRequest,
) -> Result<()> {
if let Some(physical_table_name) = request.options.get(LOGICAL_TABLE_METADATA_KEY) {
let table_id = if let Some(table_id) = self.physical_table_id {
table_id
} else {
let table_name_manager = self.context.table_metadata_manager.table_name_manager();
let table_name_key = TableNameKey::new(
&create_expr.catalog_name,
&create_expr.schema_name,
physical_table_name,
);
let table_id = table_name_manager
.get(table_name_key)
.await?
.context(TableInfoNotFoundSnafu {
table_name: physical_table_name,
})?
.table_id();
self.physical_table_id = Some(table_id);
table_id
};
// Concat physical table's table id and corresponding region number to get
// the physical region id.
let physical_region_id = RegionId::new(table_id, region_id.region_number());
request.options.insert(
LOGICAL_TABLE_METADATA_KEY.to_string(),
physical_region_id.as_u64().to_string(),
);
}
Ok(())
Ok(request)
}
}

View File

@@ -307,7 +307,7 @@ impl DropTableData {
}
fn region_routes(&self) -> &Vec<RegionRoute> {
&self.table_route_value.region_routes
self.table_route_value.region_routes()
}
fn table_info(&self) -> &RawTableInfo {

View File

@@ -177,7 +177,7 @@ impl DdlManager {
&self,
cluster_id: u64,
create_table_task: CreateTableTask,
region_routes: Vec<RegionRoute>,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<ProcedureId> {
let context = self.create_context();
@@ -185,7 +185,7 @@ impl DdlManager {
let procedure = CreateTableProcedure::new(
cluster_id,
create_table_task,
region_routes,
table_route,
region_wal_options,
context,
);
@@ -275,11 +275,10 @@ async fn handle_truncate_table_task(
table_name: table_ref.to_string(),
})?;
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route_value =
table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
let table_route = table_route_value.into_inner().region_routes;
let table_route = table_route_value.into_inner().region_routes().clone();
let id = ddl_manager
.submit_truncate_table_task(
@@ -356,9 +355,8 @@ async fn handle_drop_table_task(
table_name: table_ref.to_string(),
})?;
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route_value =
table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
let id = ddl_manager
.submit_drop_table_task(
@@ -392,7 +390,7 @@ async fn handle_create_table_task(
let TableMetadata {
table_id,
region_routes,
table_route,
region_wal_options,
} = table_meta;
@@ -402,7 +400,7 @@ async fn handle_create_table_task(
.submit_create_table_task(
cluster_id,
create_table_task,
region_routes,
table_route,
region_wal_options,
)
.await?;

View File

@@ -135,9 +135,9 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display("Table route not found: {}", table_name))]
#[snafu(display("Failed to find table route for table id {}", table_id))]
TableRouteNotFound {
table_name: String,
table_id: TableId,
location: Location,
},

View File

@@ -14,6 +14,7 @@
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::time::Duration;
use serde::{Deserialize, Serialize};
use store_api::storage::{RegionId, RegionNumber};
@@ -140,11 +141,12 @@ pub struct UpgradeRegion {
pub region_id: RegionId,
/// The `last_entry_id` of old leader region.
pub last_entry_id: Option<u64>,
/// The second of waiting for a wal replay.
/// The timeout of waiting for a wal replay.
///
/// `None` stands for no wait,
/// it's helpful to verify whether the leader region is ready.
pub wait_for_replay_secs: Option<u64>,
#[serde(with = "humantime_serde")]
pub wait_for_replay_timeout: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Display)]

View File

@@ -147,6 +147,14 @@ pub trait TableMetaKey {
fn as_raw_key(&self) -> Vec<u8>;
}
pub trait TableMetaValue {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
where
Self: Sized;
fn try_as_raw_value(&self) -> Result<Vec<u8>>;
}
pub type TableMetadataManagerRef = Arc<TableMetadataManager>;
pub struct TableMetadataManager {
@@ -221,7 +229,9 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
}
}
impl<'de, T: DeserializeOwned + Serialize> Deserialize<'de> for DeserializedValueWithBytes<T> {
impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
for DeserializedValueWithBytes<T>
{
/// - Deserialize behaviors:
///
/// The `inner` field will be deserialized from the `bytes` field.
@@ -248,11 +258,11 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
}
}
impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
let inner = serde_json::from_slice(&bytes).context(error::SerdeJsonSnafu)?;
let inner = T::try_from_raw_value(&bytes)?;
Ok(Self { bytes, inner })
}
@@ -373,13 +383,10 @@ impl TableMetadataManager {
pub async fn create_table_metadata(
&self,
mut table_info: RawTableInfo,
region_routes: Vec<RegionRoute>,
table_route_value: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<()> {
let region_numbers = region_routes
.iter()
.map(|region| region.region.id.region_number())
.collect::<Vec<_>>();
let region_numbers = table_route_value.region_numbers();
table_info.meta.region_numbers = region_numbers;
let table_id = table_info.ident.table_id;
let engine = table_info.meta.engine.clone();
@@ -403,30 +410,28 @@ impl TableMetadataManager {
.table_info_manager()
.build_create_txn(table_id, &table_info_value)?;
// Creates datanode table key value pairs.
let distribution = region_distribution(&region_routes)?;
let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
table_id,
&engine,
&region_storage_path,
region_options,
region_wal_options,
distribution,
)?;
// Creates table route.
let table_route_value = TableRouteValue::new(region_routes);
let (create_table_route_txn, on_create_table_route_failure) = self
.table_route_manager()
.build_create_txn(table_id, &table_route_value)?;
let txn = Txn::merge_all(vec![
let mut txn = Txn::merge_all(vec![
create_table_name_txn,
create_table_info_txn,
create_datanode_table_txn,
create_table_route_txn,
]);
if let TableRouteValue::Physical(x) = &table_route_value {
let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
table_id,
&engine,
&region_storage_path,
region_options,
region_wal_options,
region_distribution(&x.region_routes)?,
)?;
txn = txn.merge(create_datanode_table_txn);
}
let r = self.kv_backend.txn(txn).await?;
// Checks whether metadata was already created.
@@ -478,7 +483,7 @@ impl TableMetadataManager {
.build_delete_txn(table_id, table_info_value)?;
// Deletes datanode table key value pairs.
let distribution = region_distribution(&table_route_value.region_routes)?;
let distribution = region_distribution(table_route_value.region_routes())?;
let delete_datanode_txn = self
.datanode_table_manager()
.build_delete_txn(table_id, distribution)?;
@@ -603,7 +608,7 @@ impl TableMetadataManager {
) -> Result<()> {
// Updates the datanode table key value pairs.
let current_region_distribution =
region_distribution(&current_table_route_value.region_routes)?;
region_distribution(current_table_route_value.region_routes())?;
let new_region_distribution = region_distribution(&new_region_routes)?;
let update_datanode_table_txn = self.datanode_table_manager().build_update_txn(
@@ -651,7 +656,7 @@ impl TableMetadataManager {
where
F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
{
let mut new_region_routes = current_table_route_value.region_routes.clone();
let mut new_region_routes = current_table_route_value.region_routes().clone();
let mut updated = 0;
for route in &mut new_region_routes {
@@ -711,12 +716,12 @@ impl_table_meta_key!(TableNameKey<'_>, TableInfoKey, DatanodeTableKey);
macro_rules! impl_table_meta_value {
($($val_ty: ty), *) => {
$(
impl $val_ty {
pub fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
impl $crate::key::TableMetaValue for $val_ty {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
}
pub fn try_as_raw_value(&self) -> Result<Vec<u8>> {
fn try_as_raw_value(&self) -> Result<Vec<u8>> {
serde_json::to_vec(self).context(SerdeJsonSnafu)
}
}
@@ -744,8 +749,7 @@ macro_rules! impl_optional_meta_value {
impl_table_meta_value! {
TableNameValue,
TableInfoValue,
DatanodeTableValue,
TableRouteValue
DatanodeTableValue
}
impl_optional_meta_value! {
@@ -765,6 +769,7 @@ mod tests {
use super::datanode_table::DatanodeTableKey;
use super::test_utils;
use crate::ddl::utils::region_storage_path;
use crate::error::Result;
use crate::key::datanode_table::RegionInfo;
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
@@ -780,14 +785,14 @@ mod tests {
let region_routes = vec![region_route.clone()];
let expected_region_routes =
TableRouteValue::new(vec![region_route.clone(), region_route.clone()]);
TableRouteValue::physical(vec![region_route.clone(), region_route.clone()]);
let expected = serde_json::to_vec(&expected_region_routes).unwrap();
// Serialize behaviors:
// The inner field will be ignored.
let value = DeserializedValueWithBytes {
// ignored
inner: TableRouteValue::new(region_routes.clone()),
inner: TableRouteValue::physical(region_routes.clone()),
bytes: Bytes::from(expected.clone()),
};
@@ -831,43 +836,56 @@ mod tests {
test_utils::new_test_table_info(10, region_numbers)
}
async fn create_physical_table_metadata(
table_metadata_manager: &TableMetadataManager,
table_info: RawTableInfo,
region_routes: Vec<RegionRoute>,
) -> Result<()> {
table_metadata_manager
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
}
#[tokio::test]
async fn test_create_table_metadata() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let table_metadata_manager = TableMetadataManager::new(mem_kv);
let region_route = new_test_region_route();
let region_routes = vec![region_route.clone()];
let region_routes = &vec![region_route.clone()];
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
// if metadata was already created, it should be ok.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
assert!(create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.is_ok());
let mut modified_region_routes = region_routes.clone();
modified_region_routes.push(region_route.clone());
// if remote metadata was exists, it should return an error.
assert!(table_metadata_manager
.create_table_metadata(
table_info.clone(),
modified_region_routes,
HashMap::default()
)
.await
.is_err());
assert!(create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
modified_region_routes
)
.await
.is_err());
let (remote_table_info, remote_table_route) = table_metadata_manager
.get_full_table_info(10)
@@ -879,7 +897,7 @@ mod tests {
table_info
);
assert_eq!(
remote_table_route.unwrap().into_inner().region_routes,
remote_table_route.unwrap().into_inner().region_routes(),
region_routes
);
}
@@ -889,23 +907,23 @@ mod tests {
let mem_kv = Arc::new(MemoryKvBackend::default());
let table_metadata_manager = TableMetadataManager::new(mem_kv);
let region_route = new_test_region_route();
let region_routes = vec![region_route.clone()];
let region_routes = &vec![region_route.clone()];
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let datanode_id = 2;
let table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
let table_route_value = DeserializedValueWithBytes::from_inner(TableRouteValue::physical(
region_routes.clone(),
));
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -960,7 +978,7 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
assert_eq!(removed_table_route.region_routes, region_routes);
assert_eq!(removed_table_route.region_routes(), region_routes);
}
#[tokio::test]
@@ -973,14 +991,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
let new_table_name = "another_name".to_string();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -1045,14 +1063,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
let mut new_table_info = table_info.clone();
new_table_info.name = "hi".to_string();
let current_table_info_value =
@@ -1123,17 +1141,18 @@ mod tests {
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let current_table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
let current_table_route_value = DeserializedValueWithBytes::from_inner(
TableRouteValue::physical(region_routes.clone()),
);
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
table_metadata_manager
.update_leader_region_status(table_id, &current_table_route_value, |region_route| {
@@ -1154,11 +1173,11 @@ mod tests {
.unwrap();
assert_eq!(
updated_route_value.region_routes[0].leader_status,
updated_route_value.region_routes()[0].leader_status,
Some(RegionStatus::Downgraded)
);
assert_eq!(
updated_route_value.region_routes[1].leader_status,
updated_route_value.region_routes()[1].leader_status,
Some(RegionStatus::Downgraded)
);
}
@@ -1193,17 +1212,19 @@ mod tests {
let engine = table_info.meta.engine.as_str();
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
let current_table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
let current_table_route_value = DeserializedValueWithBytes::from_inner(
TableRouteValue::physical(region_routes.clone()),
);
// creates metadata.
table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
HashMap::default(),
)
.await
.unwrap();
create_physical_table_metadata(
&table_metadata_manager,
table_info.clone(),
region_routes.clone(),
)
.await
.unwrap();
assert_datanode_table(&table_metadata_manager, table_id, &region_routes).await;
let new_region_routes = vec![
new_region_route(1, 1),

View File

@@ -24,7 +24,8 @@ use table::metadata::TableId;
use crate::error::{InvalidTableMetadataSnafu, Result};
use crate::key::{
RegionDistribution, TableMetaKey, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
RegionDistribution, TableMetaKey, TableMetaValue, DATANODE_TABLE_KEY_PATTERN,
DATANODE_TABLE_KEY_PREFIX,
};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;

View File

@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
use super::{DeserializedValueWithBytes, TABLE_INFO_KEY_PREFIX};
use super::{DeserializedValueWithBytes, TableMetaValue, TABLE_INFO_KEY_PREFIX};
use crate::error::Result;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};

View File

@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use super::{TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use super::{TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::memory::MemoryKvBackend;

View File

@@ -71,8 +71,8 @@ impl_table_meta_value! {TableRegionValue}
#[cfg(test)]
mod tests {
use super::*;
use crate::key::TableMetaValue;
#[test]
fn test_serde() {

View File

@@ -16,11 +16,12 @@ use std::collections::HashMap;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
use store_api::storage::RegionId;
use snafu::ResultExt;
use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
use super::DeserializedValueWithBytes;
use crate::error::Result;
use super::{DeserializedValueWithBytes, TableMetaValue};
use crate::error::{Result, SerdeJsonSnafu};
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
@@ -38,41 +39,125 @@ impl TableRouteKey {
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct TableRouteValue {
#[serde(tag = "type", rename_all = "snake_case")]
pub enum TableRouteValue {
Physical(PhysicalTableRouteValue),
Logical(LogicalTableRouteValue),
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct PhysicalTableRouteValue {
pub region_routes: Vec<RegionRoute>,
version: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct LogicalTableRouteValue {
// TODO(LFC): Add table route for MetricsEngine table.
}
impl TableRouteValue {
pub fn physical(region_routes: Vec<RegionRoute>) -> Self {
Self::Physical(PhysicalTableRouteValue::new(region_routes))
}
/// Returns a new version [TableRouteValue] with `region_routes`.
pub fn update(&self, region_routes: Vec<RegionRoute>) -> Self {
let version = self.physical_table_route().version;
Self::Physical(PhysicalTableRouteValue {
region_routes,
version: version + 1,
})
}
/// Returns the version.
///
/// For test purpose.
#[cfg(any(test, feature = "testing"))]
pub fn version(&self) -> u64 {
self.physical_table_route().version
}
/// Returns the corresponding [RegionRoute].
pub fn region_route(&self, region_id: RegionId) -> Option<RegionRoute> {
self.physical_table_route()
.region_routes
.iter()
.find(|route| route.region.id == region_id)
.cloned()
}
/// Returns true if it's [TableRouteValue::Physical].
pub fn is_physical(&self) -> bool {
matches!(self, TableRouteValue::Physical(_))
}
/// Gets the [RegionRoute]s of this [TableRouteValue::Physical].
///
/// # Panics
/// The route type is not the [TableRouteValue::Physical].
pub fn region_routes(&self) -> &Vec<RegionRoute> {
&self.physical_table_route().region_routes
}
fn physical_table_route(&self) -> &PhysicalTableRouteValue {
match self {
TableRouteValue::Physical(x) => x,
_ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
}
}
pub fn region_numbers(&self) -> Vec<RegionNumber> {
match self {
TableRouteValue::Physical(x) => x
.region_routes
.iter()
.map(|region_route| region_route.region.id.region_number())
.collect::<Vec<_>>(),
TableRouteValue::Logical(x) => x
.region_ids()
.iter()
.map(|region_id| region_id.region_number())
.collect::<Vec<_>>(),
}
}
}
impl TableMetaValue for TableRouteValue {
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
let r = serde_json::from_slice::<TableRouteValue>(raw_value);
match r {
// Compatible with old TableRouteValue.
Err(e) if e.is_data() => Ok(Self::Physical(
serde_json::from_slice::<PhysicalTableRouteValue>(raw_value)
.context(SerdeJsonSnafu)?,
)),
Ok(x) => Ok(x),
Err(e) => Err(e).context(SerdeJsonSnafu),
}
}
fn try_as_raw_value(&self) -> Result<Vec<u8>> {
serde_json::to_vec(self).context(SerdeJsonSnafu)
}
}
impl PhysicalTableRouteValue {
pub fn new(region_routes: Vec<RegionRoute>) -> Self {
Self {
region_routes,
version: 0,
}
}
}
/// Returns a new version [TableRouteValue] with `region_routes`.
pub fn update(&self, region_routes: Vec<RegionRoute>) -> Self {
Self {
region_routes,
version: self.version + 1,
}
impl LogicalTableRouteValue {
pub fn physical_table_id(&self) -> TableId {
todo!()
}
/// Returns the version.
///
/// For test purpose.
#[cfg(any(tets, feature = "testing"))]
pub fn version(&self) -> u64 {
self.version
}
/// Returns the corresponding [RegionRoute].
pub fn region_route(&self, region_id: RegionId) -> Option<RegionRoute> {
self.region_routes
.iter()
.find(|route| route.region.id == region_id)
.cloned()
pub fn region_ids(&self) -> Vec<RegionId> {
todo!()
}
}
@@ -269,7 +354,24 @@ impl TableRouteManager {
) -> Result<Option<RegionDistribution>> {
self.get(table_id)
.await?
.map(|table_route| region_distribution(&table_route.into_inner().region_routes))
.map(|table_route| region_distribution(table_route.region_routes()))
.transpose()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_table_route_compatibility() {
let old_raw_v = r#"{"region_routes":[{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]},{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]}],"version":0}"#;
let v = TableRouteValue::try_from_raw_value(old_raw_v.as_bytes()).unwrap();
let new_raw_v = format!("{:?}", v);
assert_eq!(
new_raw_v,
r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }], version: 0 })"#
);
}
}

View File

@@ -10,6 +10,7 @@ chrono-tz = "0.8"
chrono.workspace = true
common-error.workspace = true
common-macro.workspace = true
once_cell.workspace = true
serde = { version = "1.0", features = ["derive"] }
serde_json.workspace = true
snafu.workspace = true

View File

@@ -20,7 +20,7 @@ use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZon
use serde::{Deserialize, Serialize};
use crate::error::{Error, InvalidDateStrSnafu, Result};
use crate::timezone::TimeZone;
use crate::timezone::Timezone;
use crate::util::{format_utc_datetime, local_datetime_to_utc};
use crate::{Date, Interval};
@@ -110,11 +110,11 @@ impl DateTime {
NaiveDateTime::from_timestamp_millis(self.0)
}
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> {
let datetime = self.to_chrono_datetime();
datetime.map(|v| match tz {
Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
Some(Timezone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
None => Utc.from_utc_datetime(&v).naive_local(),
})
}
@@ -155,10 +155,11 @@ impl DateTime {
#[cfg(test)]
mod tests {
use super::*;
use crate::timezone::set_default_timezone;
#[test]
pub fn test_new_date_time() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!("1970-01-01 08:00:00+0800", DateTime::new(0).to_string());
assert_eq!("1970-01-01 08:00:01+0800", DateTime::new(1000).to_string());
assert_eq!("1970-01-01 07:59:59+0800", DateTime::new(-1000).to_string());
@@ -166,7 +167,7 @@ mod tests {
#[test]
pub fn test_parse_from_string() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let time = "1970-01-01 00:00:00+0800";
let dt = DateTime::from_str(time).unwrap();
assert_eq!(time, &dt.to_string());
@@ -194,7 +195,7 @@ mod tests {
#[test]
fn test_parse_local_date_time() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!(
-28800000,
DateTime::from_str("1970-01-01 00:00:00").unwrap().val()

View File

@@ -51,8 +51,8 @@ pub enum Error {
#[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))]
ArithmeticOverflow { msg: String, location: Location },
#[snafu(display("Invalid time zone offset: {hours}:{minutes}"))]
InvalidTimeZoneOffset {
#[snafu(display("Invalid timezone offset: {hours}:{minutes}"))]
InvalidTimezoneOffset {
hours: i32,
minutes: u32,
location: Location,
@@ -66,8 +66,8 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid time zone string {raw}"))]
ParseTimeZoneName { raw: String, location: Location },
#[snafu(display("Invalid timezone string {raw}"))]
ParseTimezoneName { raw: String, location: Location },
}
impl ErrorExt for Error {
@@ -75,9 +75,9 @@ impl ErrorExt for Error {
match self {
Error::ParseDateStr { .. }
| Error::ParseTimestamp { .. }
| Error::InvalidTimeZoneOffset { .. }
| Error::InvalidTimezoneOffset { .. }
| Error::ParseOffsetStr { .. }
| Error::ParseTimeZoneName { .. } => StatusCode::InvalidArguments,
| Error::ParseTimezoneName { .. } => StatusCode::InvalidArguments,
Error::TimestampOverflow { .. } => StatusCode::Internal,
Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => {
StatusCode::InvalidArguments
@@ -96,9 +96,9 @@ impl ErrorExt for Error {
| Error::TimestampOverflow { location, .. }
| Error::ArithmeticOverflow { location, .. } => Some(*location),
Error::ParseDateStr { .. }
| Error::InvalidTimeZoneOffset { .. }
| Error::InvalidTimezoneOffset { .. }
| Error::ParseOffsetStr { .. }
| Error::ParseTimeZoneName { .. } => None,
| Error::ParseTimezoneName { .. } => None,
Error::InvalidDateStr { location, .. } => Some(*location),
Error::ParseInterval { location, .. } => Some(*location),
}

View File

@@ -31,4 +31,4 @@ pub use interval::Interval;
pub use range::RangeMillis;
pub use timestamp::Timestamp;
pub use timestamp_millis::TimestampMillis;
pub use timezone::TimeZone;
pub use timezone::Timezone;

View File

@@ -19,8 +19,7 @@ use chrono::{NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
use crate::timestamp::TimeUnit;
use crate::timezone::TimeZone;
use crate::util::format_utc_datetime;
use crate::timezone::{get_timezone, Timezone};
/// Time value, represents the elapsed time since midnight in the unit of `TimeUnit`.
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
@@ -109,30 +108,28 @@ impl Time {
self.as_formatted_string("%H:%M:%S%.f%z", None)
}
/// Format Time for local timezone.
pub fn to_local_string(&self) -> String {
/// Format Time for system timeszone.
pub fn to_system_tz_string(&self) -> String {
self.as_formatted_string("%H:%M:%S%.f", None)
}
/// Format Time for given timezone.
/// When timezone is None, using local time by default.
pub fn to_timezone_aware_string(&self, tz: Option<TimeZone>) -> String {
/// When timezone is None, using system timezone by default.
pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String {
self.as_formatted_string("%H:%M:%S%.f", tz)
}
fn as_formatted_string(self, pattern: &str, timezone: Option<TimeZone>) -> String {
fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String {
if let Some(time) = self.to_chrono_time() {
let date = Utc::now().date_naive();
let datetime = NaiveDateTime::new(date, time);
match timezone {
Some(TimeZone::Offset(offset)) => {
match get_timezone(timezone) {
Timezone::Offset(offset) => {
format!("{}", offset.from_utc_datetime(&datetime).format(pattern))
}
Some(TimeZone::Named(tz)) => {
Timezone::Named(tz) => {
format!("{}", tz.from_utc_datetime(&datetime).format(pattern))
}
None => format_utc_datetime(&datetime, pattern),
}
} else {
format!("[Time{}: {}]", self.unit, self.value)
@@ -223,6 +220,7 @@ mod tests {
use serde_json::Value;
use super::*;
use crate::timezone::set_default_timezone;
#[test]
fn test_time() {
@@ -312,33 +310,33 @@ mod tests {
#[test]
fn test_to_iso8601_string() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("+10:00")).unwrap();
let time_millis = 1000001;
let ts = Time::new_millisecond(time_millis);
assert_eq!("08:16:40.001+0800", ts.to_iso8601_string());
assert_eq!("10:16:40.001+1000", ts.to_iso8601_string());
let time_millis = 1000;
let ts = Time::new_millisecond(time_millis);
assert_eq!("08:00:01+0800", ts.to_iso8601_string());
assert_eq!("10:00:01+1000", ts.to_iso8601_string());
let time_millis = 1;
let ts = Time::new_millisecond(time_millis);
assert_eq!("08:00:00.001+0800", ts.to_iso8601_string());
assert_eq!("10:00:00.001+1000", ts.to_iso8601_string());
let time_seconds = 9 * 3600;
let ts = Time::new_second(time_seconds);
assert_eq!("17:00:00+0800", ts.to_iso8601_string());
assert_eq!("19:00:00+1000", ts.to_iso8601_string());
let time_seconds = 23 * 3600;
let ts = Time::new_second(time_seconds);
assert_eq!("07:00:00+0800", ts.to_iso8601_string());
assert_eq!("09:00:00+1000", ts.to_iso8601_string());
}
#[test]
fn test_serialize_to_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("+10:00")).unwrap();
assert_eq!(
"08:00:01+0800",
"10:00:01+1000",
match serde_json::Value::from(Time::new(1, TimeUnit::Second)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -346,7 +344,7 @@ mod tests {
);
assert_eq!(
"08:00:00.001+0800",
"10:00:00.001+1000",
match serde_json::Value::from(Time::new(1, TimeUnit::Millisecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -354,7 +352,7 @@ mod tests {
);
assert_eq!(
"08:00:00.000001+0800",
"10:00:00.000001+1000",
match serde_json::Value::from(Time::new(1, TimeUnit::Microsecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -362,7 +360,7 @@ mod tests {
);
assert_eq!(
"08:00:00.000000001+0800",
"10:00:00.000000001+1000",
match serde_json::Value::from(Time::new(1, TimeUnit::Nanosecond)) {
Value::String(s) => s,
_ => unreachable!(),
@@ -372,46 +370,47 @@ mod tests {
#[test]
fn test_to_timezone_aware_string() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("+10:00")).unwrap();
assert_eq!(
"08:00:00.001",
"10:00:00.001",
Time::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None)
);
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
"08:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("SYSTEM").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"08:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("+08:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap()))
);
assert_eq!(
"07:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("+07:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap()))
);
assert_eq!(
"23:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("-01:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap()))
);
assert_eq!(
"08:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("Asia/Shanghai").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap()))
);
assert_eq!(
"00:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("UTC").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap()))
);
assert_eq!(
"03:00:00.001",
Time::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("Europe/Moscow").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap()))
);
}
}

View File

@@ -27,12 +27,12 @@ use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
use crate::timezone::TimeZone;
use crate::util::{div_ceil, format_utc_datetime};
use crate::timezone::{get_timezone, Timezone};
use crate::util::div_ceil;
use crate::{error, Interval};
/// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed
/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC time zone):
/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC timezone):
/// - for [TimeUnit::Second]: [-262144-01-01 00:00:00, +262143-12-31 23:59:59]
/// - for [TimeUnit::Millisecond]: [-262144-01-01 00:00:00.000, +262143-12-31 23:59:59.999]
/// - for [TimeUnit::Microsecond]: [-262144-01-01 00:00:00.000000, +262143-12-31 23:59:59.999999]
@@ -293,26 +293,26 @@ impl Timestamp {
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f%z", None)
}
/// Format timestamp use **system timezone**.
pub fn to_local_string(&self) -> String {
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", None)
}
/// Format timestamp for given timezone.
/// When timezone is None, using local time by default.
pub fn to_timezone_aware_string(&self, tz: Option<TimeZone>) -> String {
/// If `tz==None`, the server default timezone will used.
pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String {
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", tz)
}
fn as_formatted_string(self, pattern: &str, timezone: Option<TimeZone>) -> String {
fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String {
if let Some(v) = self.to_chrono_datetime() {
match timezone {
Some(TimeZone::Offset(offset)) => {
match get_timezone(timezone) {
Timezone::Offset(offset) => {
format!("{}", offset.from_utc_datetime(&v).format(pattern))
}
Some(TimeZone::Named(tz)) => {
Timezone::Named(tz) => {
format!("{}", tz.from_utc_datetime(&v).format(pattern))
}
None => format_utc_datetime(&v, pattern),
}
} else {
format!("[Timestamp{}: {}]", self.unit, self.value)
@@ -324,11 +324,11 @@ impl Timestamp {
NaiveDateTime::from_timestamp_opt(sec, nsec)
}
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> {
let datetime = self.to_chrono_datetime();
datetime.map(|v| match tz {
Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
Some(Timezone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
None => Utc.from_utc_datetime(&v).naive_local(),
})
}
@@ -560,6 +560,7 @@ mod tests {
use serde_json::Value;
use super::*;
use crate::timezone::set_default_timezone;
#[test]
pub fn test_time_unit() {
@@ -789,7 +790,7 @@ mod tests {
#[test]
fn test_to_iso8601_string() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let datetime_str = "2020-09-08 13:42:29.042+0000";
let ts = Timestamp::from_str(datetime_str).unwrap();
assert_eq!("2020-09-08 21:42:29.042+0800", ts.to_iso8601_string());
@@ -813,7 +814,7 @@ mod tests {
#[test]
fn test_serialize_to_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!(
"1970-01-01 08:00:01+0800",
match serde_json::Value::from(Timestamp::new(1, TimeUnit::Second)) {
@@ -1054,7 +1055,7 @@ mod tests {
// $TZ doesn't take effort.
#[test]
fn test_parse_in_time_zone() {
fn test_parse_in_timezone() {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
Timestamp::new(28800, TimeUnit::Second),
@@ -1074,7 +1075,7 @@ mod tests {
#[test]
fn test_to_local_string() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!(
"1970-01-01 08:00:00.000000001",
@@ -1107,51 +1108,52 @@ mod tests {
#[test]
fn test_to_timezone_aware_string() {
set_default_timezone(Some("Asia/Shanghai")).unwrap();
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None)
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("SYSTEM").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("+08:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap()))
);
assert_eq!(
"1970-01-01 07:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("+07:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap()))
);
assert_eq!(
"1969-12-31 23:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("-01:00").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("Asia/Shanghai").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap()))
);
assert_eq!(
"1970-01-01 00:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("UTC").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap()))
);
assert_eq!(
"1970-01-01 01:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("Europe/Berlin").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Berlin").unwrap()))
);
assert_eq!(
"1970-01-01 03:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
.to_timezone_aware_string(TimeZone::from_tz_string("Europe/Moscow").unwrap())
.to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap()))
);
}

View File

@@ -15,24 +15,52 @@
use std::fmt::Display;
use std::str::FromStr;
use chrono::{FixedOffset, Local, Offset};
use chrono::FixedOffset;
use chrono_tz::Tz;
use once_cell::sync::OnceCell;
use snafu::{OptionExt, ResultExt};
use crate::error::{
InvalidTimeZoneOffsetSnafu, ParseOffsetStrSnafu, ParseTimeZoneNameSnafu, Result,
InvalidTimezoneOffsetSnafu, ParseOffsetStrSnafu, ParseTimezoneNameSnafu, Result,
};
use crate::util::find_tz_from_env;
/// System timezone in `frontend`/`standalone`,
/// config by option `default_timezone` in toml,
/// default value is `UTC` when `default_timezone` is not set.
static DEFAULT_TIMEZONE: OnceCell<Timezone> = OnceCell::new();
// Set the System timezone by `tz_str`
pub fn set_default_timezone(tz_str: Option<&str>) -> Result<()> {
let tz = match tz_str {
None | Some("") => Timezone::Named(Tz::UTC),
Some(tz) => Timezone::from_tz_string(tz)?,
};
DEFAULT_TIMEZONE.get_or_init(|| tz);
Ok(())
}
#[inline(always)]
/// If the `tz=Some(timezone)`, return `timezone` directly,
/// or return current system timezone.
pub fn get_timezone(tz: Option<Timezone>) -> Timezone {
tz.unwrap_or_else(|| {
DEFAULT_TIMEZONE
.get()
.cloned()
.unwrap_or(Timezone::Named(Tz::UTC))
})
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TimeZone {
pub enum Timezone {
Offset(FixedOffset),
Named(Tz),
}
impl TimeZone {
impl Timezone {
/// Compute timezone from given offset hours and minutes
/// Return `None` if given offset exceeds scope
/// Return `Err` if given offset exceeds scope
pub fn hours_mins_opt(offset_hours: i32, offset_mins: u32) -> Result<Self> {
let offset_secs = if offset_hours > 0 {
offset_hours * 3600 + offset_mins as i32 * 60
@@ -42,7 +70,7 @@ impl TimeZone {
FixedOffset::east_opt(offset_secs)
.map(Self::Offset)
.context(InvalidTimeZoneOffsetSnafu {
.context(InvalidTimezoneOffsetSnafu {
hours: offset_hours,
minutes: offset_mins,
})
@@ -57,10 +85,10 @@ impl TimeZone {
/// - `SYSTEM`
/// - Offset to UTC: `+08:00` , `-11:30`
/// - Named zones: `Asia/Shanghai`, `Europe/Berlin`
pub fn from_tz_string(tz_string: &str) -> Result<Option<Self>> {
pub fn from_tz_string(tz_string: &str) -> Result<Self> {
// Use system timezone
if tz_string.eq_ignore_ascii_case("SYSTEM") {
Ok(None)
Ok(Timezone::Named(find_tz_from_env().unwrap_or(Tz::UTC)))
} else if let Some((hrs, mins)) = tz_string.split_once(':') {
let hrs = hrs
.parse::<i32>()
@@ -68,16 +96,16 @@ impl TimeZone {
let mins = mins
.parse::<u32>()
.context(ParseOffsetStrSnafu { raw: tz_string })?;
Self::hours_mins_opt(hrs, mins).map(Some)
Self::hours_mins_opt(hrs, mins)
} else if let Ok(tz) = Tz::from_str(tz_string) {
Ok(Some(Self::Named(tz)))
Ok(Self::Named(tz))
} else {
ParseTimeZoneNameSnafu { raw: tz_string }.fail()
ParseTimezoneNameSnafu { raw: tz_string }.fail()
}
}
}
impl Display for TimeZone {
impl Display for Timezone {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Named(tz) => write!(f, "{}", tz.name()),
@@ -87,12 +115,9 @@ impl Display for TimeZone {
}
#[inline]
pub fn system_time_zone_name() -> String {
if let Some(tz) = find_tz_from_env() {
Local::now().with_timezone(&tz).offset().fix().to_string()
} else {
Local::now().offset().to_string()
}
/// Return current system config timezone, default config is UTC
pub fn system_timezone_name() -> String {
format!("{}", get_timezone(None))
}
#[cfg(test)]
@@ -101,61 +126,56 @@ mod tests {
#[test]
fn test_from_tz_string() {
assert_eq!(None, TimeZone::from_tz_string("SYSTEM").unwrap());
let utc_plus_8 = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 8).unwrap()));
assert_eq!(utc_plus_8, TimeZone::from_tz_string("+8:00").unwrap());
assert_eq!(utc_plus_8, TimeZone::from_tz_string("+08:00").unwrap());
assert_eq!(utc_plus_8, TimeZone::from_tz_string("08:00").unwrap());
let utc_minus_8 = Some(TimeZone::Offset(FixedOffset::west_opt(3600 * 8).unwrap()));
assert_eq!(utc_minus_8, TimeZone::from_tz_string("-08:00").unwrap());
assert_eq!(utc_minus_8, TimeZone::from_tz_string("-8:00").unwrap());
let utc_minus_8_5 = Some(TimeZone::Offset(
FixedOffset::west_opt(3600 * 8 + 60 * 30).unwrap(),
));
assert_eq!(utc_minus_8_5, TimeZone::from_tz_string("-8:30").unwrap());
let utc_plus_max = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 14).unwrap()));
assert_eq!(utc_plus_max, TimeZone::from_tz_string("14:00").unwrap());
let utc_minus_max = Some(TimeZone::Offset(
FixedOffset::west_opt(3600 * 13 + 60 * 59).unwrap(),
));
assert_eq!(utc_minus_max, TimeZone::from_tz_string("-13:59").unwrap());
assert_eq!(
Some(TimeZone::Named(Tz::Asia__Shanghai)),
TimeZone::from_tz_string("Asia/Shanghai").unwrap()
);
assert_eq!(
Some(TimeZone::Named(Tz::UTC)),
TimeZone::from_tz_string("UTC").unwrap()
Timezone::Named(Tz::UTC),
Timezone::from_tz_string("SYSTEM").unwrap()
);
assert!(TimeZone::from_tz_string("WORLD_PEACE").is_err());
assert!(TimeZone::from_tz_string("A0:01").is_err());
assert!(TimeZone::from_tz_string("20:0A").is_err());
assert!(TimeZone::from_tz_string(":::::").is_err());
assert!(TimeZone::from_tz_string("Asia/London").is_err());
assert!(TimeZone::from_tz_string("Unknown").is_err());
let utc_plus_8 = Timezone::Offset(FixedOffset::east_opt(3600 * 8).unwrap());
assert_eq!(utc_plus_8, Timezone::from_tz_string("+8:00").unwrap());
assert_eq!(utc_plus_8, Timezone::from_tz_string("+08:00").unwrap());
assert_eq!(utc_plus_8, Timezone::from_tz_string("08:00").unwrap());
let utc_minus_8 = Timezone::Offset(FixedOffset::west_opt(3600 * 8).unwrap());
assert_eq!(utc_minus_8, Timezone::from_tz_string("-08:00").unwrap());
assert_eq!(utc_minus_8, Timezone::from_tz_string("-8:00").unwrap());
let utc_minus_8_5 = Timezone::Offset(FixedOffset::west_opt(3600 * 8 + 60 * 30).unwrap());
assert_eq!(utc_minus_8_5, Timezone::from_tz_string("-8:30").unwrap());
let utc_plus_max = Timezone::Offset(FixedOffset::east_opt(3600 * 14).unwrap());
assert_eq!(utc_plus_max, Timezone::from_tz_string("14:00").unwrap());
let utc_minus_max = Timezone::Offset(FixedOffset::west_opt(3600 * 13 + 60 * 59).unwrap());
assert_eq!(utc_minus_max, Timezone::from_tz_string("-13:59").unwrap());
assert_eq!(
Timezone::Named(Tz::Asia__Shanghai),
Timezone::from_tz_string("Asia/Shanghai").unwrap()
);
assert_eq!(
Timezone::Named(Tz::UTC),
Timezone::from_tz_string("UTC").unwrap()
);
assert!(Timezone::from_tz_string("WORLD_PEACE").is_err());
assert!(Timezone::from_tz_string("A0:01").is_err());
assert!(Timezone::from_tz_string("20:0A").is_err());
assert!(Timezone::from_tz_string(":::::").is_err());
assert!(Timezone::from_tz_string("Asia/London").is_err());
assert!(Timezone::from_tz_string("Unknown").is_err());
}
#[test]
fn test_timezone_to_string() {
assert_eq!("UTC", TimeZone::Named(Tz::UTC).to_string());
assert_eq!("UTC", Timezone::Named(Tz::UTC).to_string());
assert_eq!(
"+01:00",
TimeZone::from_tz_string("01:00")
.unwrap()
.unwrap()
.to_string()
Timezone::from_tz_string("01:00").unwrap().to_string()
);
assert_eq!(
"Asia/Shanghai",
TimeZone::from_tz_string("Asia/Shanghai")
.unwrap()
Timezone::from_tz_string("Asia/Shanghai")
.unwrap()
.to_string()
);

View File

@@ -14,23 +14,24 @@
use std::str::FromStr;
use chrono::offset::Local;
use chrono::{LocalResult, NaiveDateTime, TimeZone};
use chrono_tz::Tz;
use crate::timezone::get_timezone;
pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String {
if let Some(tz) = find_tz_from_env() {
format!("{}", tz.from_utc_datetime(utc).format(pattern))
} else {
format!("{}", Local.from_utc_datetime(utc).format(pattern))
match get_timezone(None) {
crate::Timezone::Offset(offset) => {
offset.from_utc_datetime(utc).format(pattern).to_string()
}
crate::Timezone::Named(tz) => tz.from_utc_datetime(utc).format(pattern).to_string(),
}
}
pub fn local_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> {
if let Some(tz) = find_tz_from_env() {
tz.from_local_datetime(local).map(|x| x.naive_utc())
} else {
Local.from_local_datetime(local).map(|x| x.naive_utc())
match get_timezone(None) {
crate::Timezone::Offset(offset) => offset.from_local_datetime(local).map(|x| x.naive_utc()),
crate::Timezone::Named(tz) => tz.from_local_datetime(local).map(|x| x.naive_utc()),
}
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
@@ -27,6 +28,19 @@ use table::error::Error as TableError;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to execute async task"))]
AsyncTaskExecute {
location: Location,
source: Arc<Error>,
},
#[snafu(display("Failed to watch change"))]
WatchAsyncTaskChange {
location: Location,
#[snafu(source)]
error: tokio::sync::watch::error::RecvError,
},
#[snafu(display("Failed to handle heartbeat response"))]
HandleHeartbeatResponse {
location: Location,
@@ -292,7 +306,11 @@ impl ErrorExt for Error {
| MissingWalDirConfig { .. }
| MissingKvBackend { .. } => StatusCode::InvalidArguments,
PayloadNotExist { .. } | Unexpected { .. } => StatusCode::Unexpected,
PayloadNotExist { .. } | Unexpected { .. } | WatchAsyncTaskChange { .. } => {
StatusCode::Unexpected
}
AsyncTaskExecute { source, .. } => source.status_code(),
// TODO(yingwen): Further categorize http error.
ParseAddr { .. }

View File

@@ -41,6 +41,7 @@ use crate::metrics;
use crate::region_server::RegionServer;
pub(crate) mod handler;
pub(crate) mod task_tracker;
pub struct HeartbeatTask {
node_id: u64,

View File

@@ -17,130 +17,75 @@ use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply, OpenRegion, SimpleReply,
};
use common_meta::instruction::{Instruction, InstructionReply};
use common_meta::RegionIdent;
use common_telemetry::error;
use futures::future::BoxFuture;
use snafu::OptionExt;
use store_api::path_utils::region_dir;
use store_api::region_engine::SetReadonlyResponse;
use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest};
use store_api::storage::RegionId;
use crate::error;
mod close_region;
mod downgrade_region;
mod open_region;
mod upgrade_region;
use super::task_tracker::TaskTracker;
use crate::region_server::RegionServer;
/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
#[derive(Clone)]
pub struct RegionHeartbeatResponseHandler {
region_server: RegionServer,
catchup_tasks: TaskTracker<()>,
}
/// Handler of the instruction.
pub type InstructionHandler =
Box<dyn FnOnce(RegionServer) -> BoxFuture<'static, InstructionReply> + Send>;
Box<dyn FnOnce(HandlerContext) -> BoxFuture<'static, InstructionReply> + Send>;
#[derive(Clone)]
pub struct HandlerContext {
region_server: RegionServer,
catchup_tasks: TaskTracker<()>,
}
impl HandlerContext {
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
RegionId::new(region_ident.table_id, region_ident.region_number)
}
}
impl RegionHeartbeatResponseHandler {
/// Returns the [RegionHeartbeatResponseHandler].
pub fn new(region_server: RegionServer) -> Self {
Self { region_server }
Self {
region_server,
catchup_tasks: TaskTracker::new(),
}
}
/// Builds the [InstructionHandler].
fn build_handler(instruction: Instruction) -> MetaResult<InstructionHandler> {
match instruction {
Instruction::OpenRegion(OpenRegion {
region_ident,
region_storage_path,
region_options,
region_wal_options,
skip_wal_replay,
}) => Ok(Box::new(move |region_server| {
Box::pin(async move {
let region_id = Self::region_ident_to_region_id(&region_ident);
// TODO(niebayes): extends region options with region_wal_options.
let _ = region_wal_options;
let request = RegionRequest::Open(RegionOpenRequest {
engine: region_ident.engine,
region_dir: region_dir(&region_storage_path, region_id),
options: region_options,
skip_wal_replay,
});
let result = region_server.handle_request(region_id, request).await;
let success = result.is_ok();
let error = result.as_ref().map_err(|e| e.to_string()).err();
InstructionReply::OpenRegion(SimpleReply {
result: success,
error,
})
})
Instruction::OpenRegion(open_region) => Ok(Box::new(move |handler_context| {
handler_context.handle_open_region_instruction(open_region)
})),
Instruction::CloseRegion(region_ident) => Ok(Box::new(|region_server| {
Box::pin(async move {
let region_id = Self::region_ident_to_region_id(&region_ident);
let request = RegionRequest::Close(RegionCloseRequest {});
let result = region_server.handle_request(region_id, request).await;
match result {
Ok(_) => InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
}),
Err(error::Error::RegionNotFound { .. }) => {
InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
})
}
Err(err) => InstructionReply::CloseRegion(SimpleReply {
result: false,
error: Some(err.to_string()),
}),
}
})
Instruction::CloseRegion(close_region) => Ok(Box::new(|handler_context| {
handler_context.handle_close_region_instruction(close_region)
})),
Instruction::DowngradeRegion(DowngradeRegion { region_id }) => {
Ok(Box::new(move |region_server| {
Box::pin(async move {
match region_server.set_readonly_gracefully(region_id).await {
Ok(SetReadonlyResponse::Success { last_entry_id }) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: true,
error: None,
})
}
Ok(SetReadonlyResponse::NotFound) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
error: None,
})
}
Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
error: Some(err.to_string()),
}),
}
})
Instruction::DowngradeRegion(downgrade_region) => {
Ok(Box::new(move |handler_context| {
handler_context.handle_downgrade_region_instruction(downgrade_region)
}))
}
Instruction::UpgradeRegion(_) => {
todo!()
}
Instruction::UpgradeRegion(upgrade_region) => Ok(Box::new(move |handler_context| {
handler_context.handle_upgrade_region_instruction(upgrade_region)
})),
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
InvalidHeartbeatResponseSnafu.fail()
}
}
}
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
RegionId::new(region_ident.table_id, region_ident.region_number)
}
}
#[async_trait]
@@ -162,9 +107,14 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
let mailbox = ctx.mailbox.clone();
let region_server = self.region_server.clone();
let catchup_tasks = self.catchup_tasks.clone();
let handler = Self::build_handler(instruction)?;
let _handle = common_runtime::spawn_bg(async move {
let reply = handler(region_server).await;
let reply = handler(HandlerContext {
region_server,
catchup_tasks,
})
.await;
if let Err(e) = mailbox.send((meta, reply)).await {
error!(e; "Failed to send reply to mailbox");
@@ -184,10 +134,12 @@ mod tests {
use common_meta::heartbeat::mailbox::{
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
};
use common_meta::instruction::{DowngradeRegion, OpenRegion};
use mito2::config::MitoConfig;
use mito2::engine::MITO_ENGINE_NAME;
use mito2::test_util::{CreateRequestBuilder, TestEnv};
use store_api::region_request::RegionRequest;
use store_api::path_utils::region_dir;
use store_api::region_request::{RegionCloseRequest, RegionRequest};
use store_api::storage::RegionId;
use tokio::sync::mpsc::{self, Receiver};

View File

@@ -0,0 +1,54 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_meta::instruction::{InstructionReply, SimpleReply};
use common_meta::RegionIdent;
use common_telemetry::warn;
use futures_util::future::BoxFuture;
use store_api::region_request::{RegionCloseRequest, RegionRequest};
use crate::error;
use crate::heartbeat::handler::HandlerContext;
impl HandlerContext {
pub(crate) fn handle_close_region_instruction(
self,
region_ident: RegionIdent,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
let region_id = Self::region_ident_to_region_id(&region_ident);
let request = RegionRequest::Close(RegionCloseRequest {});
let result = self.region_server.handle_request(region_id, request).await;
match result {
Ok(_) => InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
}),
Err(error::Error::RegionNotFound { .. }) => {
warn!("Received a close region instruction from meta, but target region:{region_id} is not found.");
InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
})
}
Err(err) => InstructionReply::CloseRegion(SimpleReply {
result: false,
error: Some(err.output_msg()),
}),
}
})
}
}

View File

@@ -0,0 +1,51 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_meta::instruction::{DowngradeRegion, DowngradeRegionReply, InstructionReply};
use futures_util::future::BoxFuture;
use store_api::region_engine::SetReadonlyResponse;
use crate::heartbeat::handler::HandlerContext;
impl HandlerContext {
pub(crate) fn handle_downgrade_region_instruction(
self,
DowngradeRegion { region_id }: DowngradeRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
match self.region_server.set_readonly_gracefully(region_id).await {
Ok(SetReadonlyResponse::Success { last_entry_id }) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: true,
error: None,
})
}
Ok(SetReadonlyResponse::NotFound) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
error: None,
})
}
Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: true,
error: Some(err.output_msg()),
}),
}
})
}
}

View File

@@ -0,0 +1,55 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_meta::instruction::{InstructionReply, OpenRegion, SimpleReply};
use futures_util::future::BoxFuture;
use store_api::path_utils::region_dir;
use store_api::region_request::{RegionOpenRequest, RegionRequest};
use crate::heartbeat::handler::HandlerContext;
impl HandlerContext {
pub(crate) fn handle_open_region_instruction(
self,
OpenRegion {
region_ident,
region_storage_path,
region_options,
region_wal_options,
skip_wal_replay,
}: OpenRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
let region_id = Self::region_ident_to_region_id(&region_ident);
// TODO(niebayes): extends region options with region_wal_options.
let _ = region_wal_options;
let request = RegionRequest::Open(RegionOpenRequest {
engine: region_ident.engine,
region_dir: region_dir(&region_storage_path, region_id),
options: region_options,
skip_wal_replay,
});
let result = self.region_server.handle_request(region_id, request).await;
let success = result.is_ok();
let error = result.as_ref().map_err(|e| e.output_msg()).err();
InstructionReply::OpenRegion(SimpleReply {
result: success,
error,
})
})
}
}

View File

@@ -0,0 +1,363 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_meta::instruction::{InstructionReply, UpgradeRegion, UpgradeRegionReply};
use common_telemetry::warn;
use futures_util::future::BoxFuture;
use store_api::region_request::{RegionCatchupRequest, RegionRequest};
use crate::heartbeat::handler::HandlerContext;
use crate::heartbeat::task_tracker::WaitResult;
impl HandlerContext {
pub(crate) fn handle_upgrade_region_instruction(
self,
UpgradeRegion {
region_id,
last_entry_id,
wait_for_replay_timeout,
}: UpgradeRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
let Some(writable) = self.region_server.is_writable(region_id) else {
return InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: false,
error: None,
});
};
if writable {
return InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: true,
exists: true,
error: None,
});
}
let region_server_moved = self.region_server.clone();
// The catchup task is almost zero cost if the inside region is writable.
// Therefore, it always registers a new catchup task.
let register_result = self
.catchup_tasks
.try_register(
region_id,
Box::pin(async move {
region_server_moved
.handle_request(
region_id,
RegionRequest::Catchup(RegionCatchupRequest {
set_writable: true,
entry_id: last_entry_id,
}),
)
.await?;
Ok(())
}),
)
.await;
if register_result.is_busy() {
warn!("Another catchup task is running for the region: {region_id}");
}
// Returns immediately
let Some(wait_for_replay_timeout) = wait_for_replay_timeout else {
return InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: true,
error: None,
});
};
// We don't care that it returns a newly registered or running task.
let mut watcher = register_result.into_watcher();
let result = self
.catchup_tasks
.wait(&mut watcher, wait_for_replay_timeout)
.await;
match result {
WaitResult::Timeout => InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: true,
error: None,
}),
WaitResult::Finish(Ok(_)) => InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: true,
exists: true,
error: None,
}),
WaitResult::Finish(Err(err)) => {
InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: true,
error: Some(err.output_msg()),
})
}
}
})
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::time::Duration;
use common_meta::instruction::{InstructionReply, UpgradeRegion};
use store_api::region_engine::RegionRole;
use store_api::storage::RegionId;
use tokio::time::Instant;
use crate::error;
use crate::heartbeat::handler::HandlerContext;
use crate::heartbeat::task_tracker::TaskTracker;
use crate::tests::{mock_region_server, MockRegionEngine};
#[tokio::test]
async fn test_region_not_exist() {
let mut mock_region_server = mock_region_server();
let (mock_engine, _) = MockRegionEngine::new();
mock_region_server.register_engine(mock_engine);
let handler_context = HandlerContext {
region_server: mock_region_server,
catchup_tasks: TaskTracker::new(),
};
let region_id = RegionId::new(1024, 1);
let waits = vec![None, Some(Duration::from_millis(100u64))];
for wait_for_replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout,
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(!reply.exists);
assert!(reply.error.is_none());
}
}
}
#[tokio::test]
async fn test_region_writable() {
let mock_region_server = mock_region_server();
let region_id = RegionId::new(1024, 1);
let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| {
region_engine.mock_role = Some(Some(RegionRole::Leader));
region_engine.handle_request_mock_fn = Some(Box::new(|_, _| {
// Should be unreachable.
unreachable!();
}));
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext {
region_server: mock_region_server,
catchup_tasks: TaskTracker::new(),
};
let waits = vec![None, Some(Duration::from_millis(100u64))];
for wait_for_replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout,
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
}
}
#[tokio::test]
async fn test_region_not_ready() {
let mock_region_server = mock_region_server();
let region_id = RegionId::new(1024, 1);
let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| {
// Region is not ready.
region_engine.mock_role = Some(Some(RegionRole::Follower));
region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0)));
// Note: Don't change.
region_engine.handle_request_delay = Some(Duration::from_secs(100));
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext {
region_server: mock_region_server,
catchup_tasks: TaskTracker::new(),
};
let waits = vec![None, Some(Duration::from_millis(100u64))];
for wait_for_replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout,
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(!reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
}
}
#[tokio::test]
async fn test_region_not_ready_with_retry() {
let mock_region_server = mock_region_server();
let region_id = RegionId::new(1024, 1);
let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| {
// Region is not ready.
region_engine.mock_role = Some(Some(RegionRole::Follower));
region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0)));
// Note: Don't change.
region_engine.handle_request_delay = Some(Duration::from_millis(300));
});
mock_region_server.register_test_region(region_id, mock_engine);
let waits = vec![
Some(Duration::from_millis(100u64)),
Some(Duration::from_millis(100u64)),
];
let handler_context = HandlerContext {
region_server: mock_region_server,
catchup_tasks: TaskTracker::new(),
};
for wait_for_replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout,
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(!reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
}
let timer = Instant::now();
let reply = handler_context
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout: Some(Duration::from_millis(500)),
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
// Must less than 300 ms.
assert!(timer.elapsed().as_millis() < 300);
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
}
#[tokio::test]
async fn test_region_error() {
let mock_region_server = mock_region_server();
let region_id = RegionId::new(1024, 1);
let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| {
// Region is not ready.
region_engine.mock_role = Some(Some(RegionRole::Follower));
region_engine.handle_request_mock_fn = Some(Box::new(|_, _| {
error::UnexpectedSnafu {
violated: "mock_error".to_string(),
}
.fail()
}));
// Note: Don't change.
region_engine.handle_request_delay = Some(Duration::from_millis(100));
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext {
region_server: mock_region_server,
catchup_tasks: TaskTracker::new(),
};
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout: None,
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
// It didn't wait for handle returns; it had no idea about the error.
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(!reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
wait_for_replay_timeout: Some(Duration::from_millis(200)),
})
.await;
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
if let InstructionReply::UpgradeRegion(reply) = reply {
assert!(!reply.ready);
assert!(reply.exists);
assert!(reply.error.is_some());
assert!(reply.error.unwrap().contains("mock_error"));
}
}
}

View File

@@ -0,0 +1,279 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use futures_util::future::BoxFuture;
use snafu::ResultExt;
use store_api::storage::RegionId;
use tokio::sync::watch::{self, Receiver};
use tokio::sync::RwLock;
use crate::error::{self, Error, Result};
/// The state of a async task.
#[derive(Debug, Default, Clone)]
pub(crate) enum TaskState<T: Send + Sync + Clone> {
Error(Arc<Error>),
#[default]
Running,
Done(T),
}
pub(crate) type TaskWatcher<T> = Receiver<TaskState<T>>;
async fn wait<T: Send + Sync + Clone>(watcher: &mut TaskWatcher<T>) -> Result<T> {
loop {
watcher
.changed()
.await
.context(error::WatchAsyncTaskChangeSnafu)?;
let r = &*watcher.borrow();
match r {
TaskState::Error(err) => return Err(err.clone()).context(error::AsyncTaskExecuteSnafu),
TaskState::Running => {}
TaskState::Done(value) => return Ok(value.clone()),
}
}
}
/// The running async task.
pub(crate) struct Task<T: Send + Sync + Clone> {
watcher: TaskWatcher<T>,
}
pub(crate) struct TaskTrackerInner<T: Send + Sync + Clone> {
state: HashMap<RegionId, Task<T>>,
}
impl<T: Send + Sync + Clone> Default for TaskTrackerInner<T> {
fn default() -> Self {
TaskTrackerInner {
state: HashMap::new(),
}
}
}
/// Tracks the long-running async tasks.
#[derive(Clone)]
pub(crate) struct TaskTracker<T: Send + Sync + Clone> {
inner: Arc<RwLock<TaskTrackerInner<T>>>,
}
/// The registering result of a async task.
pub(crate) enum RegisterResult<T: Send + Sync + Clone> {
// The watcher of the running task.
Busy(TaskWatcher<T>),
// The watcher of the newly registered task.
Running(TaskWatcher<T>),
}
impl<T: Send + Sync + Clone> RegisterResult<T> {
pub(crate) fn into_watcher(self) -> TaskWatcher<T> {
match self {
RegisterResult::Busy(inner) => inner,
RegisterResult::Running(inner) => inner,
}
}
/// Returns true if it's [RegisterResult::Busy].
pub(crate) fn is_busy(&self) -> bool {
matches!(self, RegisterResult::Busy(_))
}
#[cfg(test)]
/// Returns true if it's [RegisterResult::Running].
pub(crate) fn is_running(&self) -> bool {
matches!(self, RegisterResult::Running(_))
}
}
/// The result of waiting.
pub(crate) enum WaitResult<T> {
Timeout,
Finish(Result<T>),
}
#[cfg(test)]
impl<T> WaitResult<T> {
/// Returns true if it's [WaitResult::Timeout].
pub(crate) fn is_timeout(&self) -> bool {
matches!(self, WaitResult::Timeout)
}
/// Into the [WaitResult::Timeout] if it's.
pub(crate) fn into_finish(self) -> Option<Result<T>> {
match self {
WaitResult::Timeout => None,
WaitResult::Finish(result) => Some(result),
}
}
}
impl<T: Send + Sync + Clone + 'static> TaskTracker<T> {
/// Returns an empty [AsyncTaskTracker].
pub(crate) fn new() -> Self {
Self {
inner: Arc::new(RwLock::new(TaskTrackerInner::default())),
}
}
/// Waits for a [RegisterResult] and returns a [WaitResult].
pub(crate) async fn wait(
&self,
watcher: &mut TaskWatcher<T>,
timeout: Duration,
) -> WaitResult<T> {
match tokio::time::timeout(timeout, wait(watcher)).await {
Ok(result) => WaitResult::Finish(result),
Err(_) => WaitResult::Timeout,
}
}
/// Tries to register a new async task, returns [RegisterResult::Busy] if previous task is running.
pub(crate) async fn try_register(
&self,
region_id: RegionId,
fut: BoxFuture<'static, Result<T>>,
) -> RegisterResult<T> {
let mut inner = self.inner.write().await;
if let Some(task) = inner.state.get(&region_id) {
RegisterResult::Busy(task.watcher.clone())
} else {
let moved_inner = self.inner.clone();
let (tx, rx) = watch::channel(TaskState::<T>::Running);
common_runtime::spawn_bg(async move {
match fut.await {
Ok(result) => {
let _ = tx.send(TaskState::Done(result));
}
Err(err) => {
let _ = tx.send(TaskState::Error(Arc::new(err)));
}
};
moved_inner.write().await.state.remove(&region_id);
});
inner.state.insert(
region_id,
Task {
watcher: rx.clone(),
},
);
RegisterResult::Running(rx.clone())
}
}
#[cfg(test)]
async fn watcher(&self, region_id: RegionId) -> Option<TaskWatcher<T>> {
self.inner
.read()
.await
.state
.get(&region_id)
.map(|task| task.watcher.clone())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use store_api::storage::RegionId;
use tokio::sync::oneshot;
use crate::heartbeat::task_tracker::{wait, TaskTracker};
#[derive(Debug, Clone, PartialEq, Eq)]
struct TestResult {
value: i32,
}
#[tokio::test]
async fn test_async_task_tracker_register() {
let tracker = TaskTracker::<TestResult>::new();
let region_id = RegionId::new(1024, 1);
let (tx, rx) = oneshot::channel::<()>();
let result = tracker
.try_register(
region_id,
Box::pin(async move {
let _ = rx.await;
Ok(TestResult { value: 1024 })
}),
)
.await;
assert!(result.is_running());
let result = tracker
.try_register(
region_id,
Box::pin(async move { Ok(TestResult { value: 1023 }) }),
)
.await;
assert!(result.is_busy());
let mut watcher = tracker.watcher(region_id).await.unwrap();
// Triggers first future return.
tx.send(()).unwrap();
assert_eq!(
TestResult { value: 1024 },
wait(&mut watcher).await.unwrap()
);
let result = tracker
.try_register(
region_id,
Box::pin(async move { Ok(TestResult { value: 1022 }) }),
)
.await;
assert!(result.is_running());
}
#[tokio::test]
async fn test_async_task_tracker_wait_timeout() {
let tracker = TaskTracker::<TestResult>::new();
let region_id = RegionId::new(1024, 1);
let (tx, rx) = oneshot::channel::<()>();
let result = tracker
.try_register(
region_id,
Box::pin(async move {
let _ = rx.await;
Ok(TestResult { value: 1024 })
}),
)
.await;
let mut watcher = result.into_watcher();
let result = tracker.wait(&mut watcher, Duration::from_millis(100)).await;
assert!(result.is_timeout());
// Triggers first future return.
tx.send(()).unwrap();
let result = tracker
.wait(&mut watcher, Duration::from_millis(100))
.await
.into_finish()
.unwrap()
.unwrap();
assert_eq!(TestResult { value: 1024 }, result);
assert!(tracker.watcher(region_id).await.is_none());
}
}

View File

@@ -139,6 +139,16 @@ impl RegionServer {
.collect()
}
pub fn is_writable(&self, region_id: RegionId) -> Option<bool> {
// TODO(weny): Finds a better way.
self.inner.region_map.get(&region_id).and_then(|engine| {
engine.role(region_id).map(|role| match role {
RegionRole::Follower => false,
RegionRole::Leader => true,
})
})
}
pub fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<()> {
let engine = self
.inner
@@ -178,6 +188,14 @@ impl RegionServer {
pub async fn stop(&self) -> Result<()> {
self.inner.stop().await
}
#[cfg(test)]
/// Registers a region for test purpose.
pub(crate) fn register_test_region(&self, region_id: RegionId, engine: RegionEngineRef) {
self.inner
.region_map
.insert(region_id, RegionEngineWithStatus::Ready(engine));
}
}
#[async_trait]

View File

@@ -14,6 +14,7 @@
use std::any::Any;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use common_error::ext::BoxedError;
@@ -93,7 +94,9 @@ pub type MockRequestHandler =
pub struct MockRegionEngine {
sender: Sender<(RegionId, RegionRequest)>,
handle_request_mock_fn: Option<MockRequestHandler>,
pub(crate) handle_request_delay: Option<Duration>,
pub(crate) handle_request_mock_fn: Option<MockRequestHandler>,
pub(crate) mock_role: Option<Option<RegionRole>>,
}
impl MockRegionEngine {
@@ -102,8 +105,10 @@ impl MockRegionEngine {
(
Arc::new(Self {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: None,
mock_role: None,
}),
rx,
)
@@ -116,12 +121,31 @@ impl MockRegionEngine {
(
Arc::new(Self {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: Some(mock_fn),
mock_role: None,
}),
rx,
)
}
pub fn with_custom_apply_fn<F>(apply: F) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>)
where
F: FnOnce(&mut MockRegionEngine),
{
let (tx, rx) = tokio::sync::mpsc::channel(8);
let mut region_engine = Self {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: None,
mock_role: None,
};
apply(&mut region_engine);
(Arc::new(region_engine), rx)
}
}
#[async_trait::async_trait]
@@ -135,6 +159,9 @@ impl RegionEngine for MockRegionEngine {
region_id: RegionId,
request: RegionRequest,
) -> Result<AffectedRows, BoxedError> {
if let Some(delay) = self.handle_request_delay {
tokio::time::sleep(delay).await;
}
if let Some(mock_fn) = &self.handle_request_mock_fn {
return mock_fn(region_id, request).map_err(BoxedError::new);
};
@@ -175,6 +202,9 @@ impl RegionEngine for MockRegionEngine {
}
fn role(&self, _region_id: RegionId) -> Option<RegionRole> {
if let Some(role) = self.mock_role {
return role;
}
Some(RegionRole::Leader)
}
}

View File

@@ -120,11 +120,13 @@ define_time_with_unit!(Nanosecond, i64);
#[cfg(test)]
mod tests {
use common_time::timezone::set_default_timezone;
use super::*;
#[test]
fn test_to_serde_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let time = TimeSecond::new(123);
let val = serde_json::Value::from(time);
match val {

View File

@@ -122,11 +122,13 @@ define_timestamp_with_unit!(Nanosecond);
#[cfg(test)]
mod tests {
use common_time::timezone::set_default_timezone;
use super::*;
#[test]
fn test_to_serde_json_value() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let ts = TimestampSecond::new(123);
let val = serde_json::Value::from(ts);
match val {

View File

@@ -176,6 +176,7 @@ mod tests {
use common_base::bytes::StringBytes;
use common_time::time::Time;
use common_time::timezone::set_default_timezone;
use common_time::{Date, DateTime, Timestamp};
use ordered_float::OrderedFloat;
@@ -213,7 +214,7 @@ mod tests {
#[test]
fn test_cast_with_opt() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
// non-strict mode
let cast_option = CastOption { strict: false };
let src_value = Value::Int8(-1);

View File

@@ -101,6 +101,7 @@ impl LogicalPrimitiveType for DateType {
#[cfg(test)]
mod tests {
use common_base::bytes::StringBytes;
use common_time::timezone::set_default_timezone;
use common_time::Timestamp;
use super::*;
@@ -108,7 +109,7 @@ mod tests {
// $TZ doesn't take effort
#[test]
fn test_date_cast() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
// timestamp -> date
let ts = Value::Timestamp(Timestamp::from_str("2000-01-01 08:00:01").unwrap());
let date = ConcreteDataType::date_datatype().try_cast(ts).unwrap();

View File

@@ -101,6 +101,7 @@ impl LogicalPrimitiveType for DateTimeType {
#[cfg(test)]
mod tests {
use common_time::timezone::set_default_timezone;
use common_time::Timestamp;
use super::*;
@@ -113,7 +114,7 @@ mod tests {
assert_eq!(dt, Value::DateTime(DateTime::from(1000)));
// cast from String
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let val = Value::String("1970-01-01 00:00:00+0800".into());
let dt = ConcreteDataType::datetime_datatype().try_cast(val).unwrap();
assert_eq!(

View File

@@ -203,6 +203,7 @@ impl_data_type_for_timestamp!(Microsecond);
#[cfg(test)]
mod tests {
use common_time::timezone::set_default_timezone;
use common_time::{Date, DateTime};
use super::*;
@@ -230,7 +231,7 @@ mod tests {
// $TZ doesn't take effort
#[test]
fn test_timestamp_cast() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
// String -> TimestampSecond
let s = Value::String("2021-01-01 01:02:03".to_string().into());
let ts = ConcreteDataType::timestamp_second_datatype()

View File

@@ -827,8 +827,10 @@ impl TryFrom<ScalarValue> for Value {
ScalarValue::DurationNanosecond(d) => d
.map(|x| Value::Duration(Duration::new(x, TimeUnit::Nanosecond)))
.unwrap_or(Value::Null),
ScalarValue::Decimal128(_, _, _)
| ScalarValue::Decimal256(_, _, _)
ScalarValue::Decimal128(v, p, s) => v
.map(|v| Value::Decimal128(Decimal128::new(v, p, s)))
.unwrap_or(Value::Null),
ScalarValue::Decimal256(_, _, _)
| ScalarValue::Struct(_, _)
| ScalarValue::Dictionary(_, _) => {
return error::UnsupportedArrowTypeSnafu {
@@ -1190,6 +1192,7 @@ impl<'a> ValueRef<'a> {
#[cfg(test)]
mod tests {
use arrow::datatypes::DataType as ArrowDataType;
use common_time::timezone::set_default_timezone;
use num_traits::Float;
use super::*;
@@ -1474,11 +1477,14 @@ mod tests {
ScalarValue::DurationNanosecond(None).try_into().unwrap()
);
let result: Result<Value> = ScalarValue::Decimal128(Some(1), 0, 0).try_into();
assert!(result
.unwrap_err()
.to_string()
.contains("Unsupported arrow data type, type: Decimal128(0, 0)"));
assert_eq!(
Value::Decimal128(Decimal128::new(1, 38, 10)),
ScalarValue::Decimal128(Some(1), 38, 10).try_into().unwrap()
);
assert_eq!(
Value::Null,
ScalarValue::Decimal128(None, 0, 0).try_into().unwrap()
);
}
#[test]
@@ -1875,7 +1881,7 @@ mod tests {
#[test]
fn test_display() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!(Value::Null.to_string(), "Null");
assert_eq!(Value::UInt8(8).to_string(), "8");
assert_eq!(Value::UInt16(16).to_string(), "16");

View File

@@ -26,6 +26,7 @@ mod tests {
use arrow::array::{Array, PrimitiveArray};
use arrow_array::ArrayRef;
use common_time::timezone::set_default_timezone;
use common_time::DateTime;
use super::*;
@@ -37,7 +38,7 @@ mod tests {
#[test]
fn test_datetime_vector() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let v = DateTimeVector::new(PrimitiveArray::from(vec![1000, 2000, 3000]));
assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
assert_eq!(3, v.len());

View File

@@ -32,6 +32,7 @@ use crate::service_config::{
pub struct FrontendOptions {
pub mode: Mode,
pub node_id: Option<String>,
pub default_timezone: Option<String>,
pub heartbeat: HeartbeatOptions,
pub http: HttpOptions,
pub grpc: GrpcOptions,
@@ -53,6 +54,7 @@ impl Default for FrontendOptions {
Self {
mode: Mode::Standalone,
node_id: None,
default_timezone: None,
heartbeat: HeartbeatOptions::frontend_default(),
http: HttpOptions::default(),
grpc: GrpcOptions::default(),

View File

@@ -18,10 +18,14 @@ use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use async_trait::async_trait;
use client::region::check_response_header;
use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_meta::datanode_manager::{AffectedRows, Datanode, DatanodeManager, DatanodeRef};
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
use common_meta::error::{self as meta_error, Result as MetaResult, UnsupportedSnafu};
use common_meta::key::table_route::{
LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
};
use common_meta::peer::Peer;
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -34,7 +38,7 @@ use common_telemetry::{debug, info, tracing};
use datanode::region_server::RegionServer;
use servers::grpc::region_server::RegionServerHandler;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{RegionId, TableId};
use store_api::storage::{RegionId, RegionNumber, TableId};
use crate::error::{InvalidRegionRequestSnafu, InvokeRegionServerSnafu, Result};
@@ -151,17 +155,29 @@ impl StandaloneTableMetadataAllocator {
};
Ok(table_id)
}
fn create_wal_options(
&self,
table_route: &TableRouteValue,
) -> MetaResult<HashMap<RegionNumber, String>> {
match table_route {
TableRouteValue::Physical(x) => {
let region_numbers = x
.region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
}
TableRouteValue::Logical(_) => Ok(HashMap::new()),
}
}
}
#[async_trait]
impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
async fn create(
&self,
_ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> MetaResult<TableMetadata> {
let table_id = self.allocate_table_id(task).await?;
fn create_table_route(table_id: TableId, task: &CreateTableTask) -> TableRouteValue {
if task.create_table.engine == METRIC_ENGINE {
TableRouteValue::Logical(LogicalTableRouteValue {})
} else {
let region_routes = task
.partitions
.iter()
@@ -182,13 +198,22 @@ impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
}
})
.collect::<Vec<_>>();
TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
}
}
let region_numbers = region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
let region_wal_options =
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
#[async_trait]
impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
async fn create(
&self,
_ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> MetaResult<TableMetadata> {
let table_id = self.allocate_table_id(task).await?;
let table_route = create_table_route(table_id, task);
let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -197,8 +222,8 @@ impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
Ok(TableMetadata {
table_id,
region_routes,
region_wal_options: HashMap::default(),
table_route,
region_wal_options,
})
}
}

View File

@@ -192,8 +192,11 @@ pub async fn metasrv_builder(
let etcd_client = create_etcd_client(opts).await?;
let kv_backend = {
let etcd_backend = EtcdStore::with_etcd_client(etcd_client.clone());
if let Some(prefix) = opts.store_key_prefix.clone() {
Arc::new(ChrootKvBackend::new(prefix.into_bytes(), etcd_backend))
if !opts.store_key_prefix.is_empty() {
Arc::new(ChrootKvBackend::new(
opts.store_key_prefix.clone().into_bytes(),
etcd_backend,
))
} else {
etcd_backend
}

View File

@@ -35,14 +35,14 @@ pub struct EtcdElection {
is_leader: AtomicBool,
infancy: AtomicBool,
leader_watcher: broadcast::Sender<LeaderChangeMessage>,
store_key_prefix: Option<String>,
store_key_prefix: String,
}
impl EtcdElection {
pub async fn with_endpoints<E, S>(
leader_value: E,
endpoints: S,
store_key_prefix: Option<String>,
store_key_prefix: String,
) -> Result<ElectionRef>
where
E: AsRef<str>,
@@ -58,7 +58,7 @@ impl EtcdElection {
pub async fn with_etcd_client<E>(
leader_value: E,
client: Client,
store_key_prefix: Option<String>,
store_key_prefix: String,
) -> Result<ElectionRef>
where
E: AsRef<str>,
@@ -105,9 +105,10 @@ impl EtcdElection {
}
fn election_key(&self) -> String {
match &self.store_key_prefix {
Some(prefix) => format!("{}{}", prefix, ELECTION_KEY),
None => ELECTION_KEY.to_string(),
if self.store_key_prefix.is_empty() {
ELECTION_KEY.to_string()
} else {
format!("{}{}", self.store_key_prefix, ELECTION_KEY)
}
}
}

View File

@@ -32,6 +32,12 @@ use crate::pubsub::Message;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Another migration procedure is running for region: {}", region_id))]
MigrationRunning {
location: Location,
region_id: RegionId,
},
#[snafu(display("The region migration procedure aborted, reason: {}", reason))]
MigrationAbort { location: Location, reason: String },
@@ -675,7 +681,8 @@ impl ErrorExt for Error {
| Error::TableIdChanged { .. }
| Error::RegionOpeningRace { .. }
| Error::RegionRouteNotFound { .. }
| Error::MigrationAbort { .. } => StatusCode::Unexpected,
| Error::MigrationAbort { .. }
| Error::MigrationRunning { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidateTableCache { source, .. } => source.status_code(),
Error::RequestDatanode { source, .. } => source.status_code(),

View File

@@ -22,6 +22,7 @@ use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler};
use crate::keys::{LeaseKey, LeaseValue};
use crate::metasrv::Context;
/// Keeps [Datanode] leases
pub struct KeepLeaseHandler;
#[async_trait::async_trait]

View File

@@ -104,6 +104,7 @@ mod test {
use std::sync::Arc;
use common_meta::distributed_time_constants;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -161,7 +162,11 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();
@@ -303,7 +308,11 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();

View File

@@ -25,14 +25,11 @@ use crate::error::Result;
#[derive(Clone)]
pub struct EtcdLock {
client: Client,
store_key_prefix: Option<String>,
store_key_prefix: String,
}
impl EtcdLock {
pub async fn with_endpoints<E, S>(
endpoints: S,
store_key_prefix: Option<String>,
) -> Result<DistLockRef>
pub async fn with_endpoints<E, S>(endpoints: S, store_key_prefix: String) -> Result<DistLockRef>
where
E: AsRef<str>,
S: AsRef<[E]>,
@@ -44,10 +41,7 @@ impl EtcdLock {
Self::with_etcd_client(client, store_key_prefix)
}
pub fn with_etcd_client(
client: Client,
store_key_prefix: Option<String>,
) -> Result<DistLockRef> {
pub fn with_etcd_client(client: Client, store_key_prefix: String) -> Result<DistLockRef> {
Ok(Arc::new(EtcdLock {
client,
store_key_prefix,
@@ -55,13 +49,12 @@ impl EtcdLock {
}
fn lock_key(&self, key: Vec<u8>) -> Vec<u8> {
match &self.store_key_prefix {
Some(prefix) => {
let mut prefix = prefix.as_bytes().to_vec();
prefix.extend_from_slice(&key);
prefix
}
None => key,
if self.store_key_prefix.is_empty() {
key
} else {
let mut prefix = self.store_key_prefix.as_bytes().to_vec();
prefix.extend_from_slice(&key);
prefix
}
}
}

View File

@@ -75,7 +75,7 @@ pub struct MetaSrvOptions {
pub data_home: String,
pub wal: WalConfig,
pub export_metrics: ExportMetricsOption,
pub store_key_prefix: Option<String>,
pub store_key_prefix: String,
}
impl Default for MetaSrvOptions {
@@ -102,7 +102,7 @@ impl Default for MetaSrvOptions {
data_home: METASRV_HOME.to_string(),
wal: WalConfig::default(),
export_metrics: ExportMetricsOption::default(),
store_key_prefix: None,
store_key_prefix: String::new(),
}
}
}

View File

@@ -207,7 +207,7 @@ mod tests {
.unwrap();
let should_downgraded = table_route_value
.region_routes
.region_routes()
.iter()
.find(|route| route.region.id.region_number() == failed_region.region_number)
.unwrap();

View File

@@ -85,7 +85,7 @@ impl UpdateRegionMetadata {
.context(error::TableMetadataManagerSnafu)?
.context(TableRouteNotFoundSnafu { table_id })?;
let mut new_region_routes = table_route_value.region_routes.clone();
let mut new_region_routes = table_route_value.region_routes().clone();
for region_route in new_region_routes.iter_mut() {
if region_route.region.id.region_number() == failed_region.region_number {
@@ -233,7 +233,8 @@ mod tests {
.unwrap()
.unwrap()
.into_inner()
.region_routes
.region_routes()
.clone()
}
// Original region routes:
@@ -395,8 +396,8 @@ mod tests {
.unwrap()
.into_inner();
let peers = &extract_all_peers(&table_route_value.region_routes);
let actual = &table_route_value.region_routes;
let peers = &extract_all_peers(table_route_value.region_routes());
let actual = table_route_value.region_routes();
let expected = &vec![
new_region_route(1, peers, 2),
new_region_route(2, peers, 3),
@@ -415,7 +416,7 @@ mod tests {
.unwrap()
.into_inner();
let map = region_distribution(&table_route_value.region_routes).unwrap();
let map = region_distribution(table_route_value.region_routes()).unwrap();
assert_eq!(map.len(), 2);
assert_eq!(map.get(&2), Some(&vec![1, 3]));
assert_eq!(map.get(&3), Some(&vec![2, 4]));

View File

@@ -13,6 +13,9 @@
// limitations under the License.
pub(crate) mod downgrade_leader_region;
// TODO(weny): remove it.
#[allow(dead_code)]
pub(crate) mod manager;
pub(crate) mod migration_abort;
pub(crate) mod migration_end;
pub(crate) mod migration_start;
@@ -123,6 +126,7 @@ pub trait ContextFactory {
}
/// Default implementation.
#[derive(Clone)]
pub struct ContextFactoryImpl {
volatile_ctx: VolatileContext,
table_metadata_manager: TableMetadataManagerRef,

View File

@@ -0,0 +1,452 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::{Arc, RwLock};
use common_meta::key::table_route::TableRouteValue;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use common_meta::ClusterId;
use common_procedure::{watcher, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::{error, info};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionId;
use crate::error::{self, Result};
use crate::procedure::region_migration::{
ContextFactoryImpl, PersistentContext, RegionMigrationProcedure,
};
/// Manager of region migration procedure.
pub(crate) struct RegionMigrationManager {
procedure_manager: ProcedureManagerRef,
running_procedures: Arc<RwLock<HashMap<RegionId, RegionMigrationProcedureTask>>>,
context_factory: ContextFactoryImpl,
}
/// The guard of running [RegionMigrationProcedureTask].
pub(crate) struct RegionMigrationProcedureGuard {
region_id: RegionId,
running_procedures: Arc<RwLock<HashMap<RegionId, RegionMigrationProcedureTask>>>,
}
impl Drop for RegionMigrationProcedureGuard {
fn drop(&mut self) {
self.running_procedures
.write()
.unwrap()
.remove(&self.region_id);
}
}
#[derive(Debug, Clone)]
pub(crate) struct RegionMigrationProcedureTask {
cluster_id: ClusterId,
region_id: RegionId,
from_peer: Peer,
to_peer: Peer,
}
impl Display for RegionMigrationProcedureTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"cluster: {}, region: {}, from_peer: {}, to_peer: {}",
self.cluster_id, self.region_id, self.from_peer, self.to_peer
)
}
}
impl From<RegionMigrationProcedureTask> for PersistentContext {
fn from(
RegionMigrationProcedureTask {
cluster_id,
region_id,
from_peer,
to_peer,
}: RegionMigrationProcedureTask,
) -> Self {
PersistentContext {
cluster_id,
from_peer,
to_peer,
region_id,
}
}
}
impl RegionMigrationManager {
/// Returns new [RegionMigrationManager]
pub(crate) fn new(
procedure_manager: ProcedureManagerRef,
context_factory: ContextFactoryImpl,
) -> Self {
Self {
procedure_manager,
running_procedures: Arc::new(RwLock::new(HashMap::new())),
context_factory,
}
}
/// Registers the loader of [RegionMigrationProcedure] to the `ProcedureManager`.
pub(crate) fn try_start(&self) -> Result<()> {
let context_factory = self.context_factory.clone();
self.procedure_manager
.register_loader(
RegionMigrationProcedure::TYPE_NAME,
Box::new(move |json| {
let context_factory = context_factory.clone();
RegionMigrationProcedure::from_json(json, context_factory)
.map(|p| Box::new(p) as _)
}),
)
.context(error::RegisterProcedureLoaderSnafu {
type_name: RegionMigrationProcedure::TYPE_NAME,
})
}
fn insert_running_procedure(
&self,
task: &RegionMigrationProcedureTask,
) -> Option<RegionMigrationProcedureGuard> {
let mut procedures = self.running_procedures.write().unwrap();
match procedures.entry(task.region_id) {
Entry::Occupied(_) => None,
Entry::Vacant(v) => {
v.insert(task.clone());
Some(RegionMigrationProcedureGuard {
region_id: task.region_id,
running_procedures: self.running_procedures.clone(),
})
}
}
}
fn verify_task(&self, task: &RegionMigrationProcedureTask) -> Result<()> {
if task.to_peer.id == task.from_peer.id {
return error::InvalidArgumentsSnafu {
err_msg: "The `from_peer_id` can't equal `to_peer_id`",
}
.fail();
}
Ok(())
}
async fn retrieve_table_route(&self, region_id: RegionId) -> Result<TableRouteValue> {
let table_route = self
.context_factory
.table_metadata_manager
.table_route_manager()
.get(region_id.table_id())
.await
.context(error::TableMetadataManagerSnafu)?
.context(error::TableRouteNotFoundSnafu {
table_id: region_id.table_id(),
})?;
Ok(table_route.into_inner())
}
/// Verifies the type of region migration table route.
fn verify_table_route(
&self,
table_route: &TableRouteValue,
task: &RegionMigrationProcedureTask,
) -> Result<()> {
if !table_route.is_physical() {
return error::UnexpectedSnafu {
violated: format!(
"Trying to execute region migration on the logical table, task {task}"
),
}
.fail();
}
Ok(())
}
/// Returns true if the region has been migrated.
fn has_migrated(
&self,
region_route: &RegionRoute,
task: &RegionMigrationProcedureTask,
) -> Result<bool> {
let leader_peer = region_route
.leader_peer
.as_ref()
.context(error::UnexpectedSnafu {
violated: "Region route leader peer is not found",
})?;
Ok(leader_peer.id == task.to_peer.id)
}
/// Throws an error if `leader_peer` is not the `from_peer`.
fn verify_region_leader_peer(
&self,
region_route: &RegionRoute,
task: &RegionMigrationProcedureTask,
) -> Result<()> {
let leader_peer = region_route
.leader_peer
.as_ref()
.context(error::UnexpectedSnafu {
violated: "Region route leader peer is not found",
})?;
ensure!(
leader_peer.id == task.from_peer.id,
error::InvalidArgumentsSnafu {
err_msg: "Invalid region migration `from_peer` argument"
}
);
Ok(())
}
/// Submits a new region migration procedure.
pub(crate) async fn submit_procedure(&self, task: RegionMigrationProcedureTask) -> Result<()> {
let Some(guard) = self.insert_running_procedure(&task) else {
return error::MigrationRunningSnafu {
region_id: task.region_id,
}
.fail();
};
self.verify_task(&task)?;
let region_id = task.region_id;
let table_route = self.retrieve_table_route(region_id).await?;
self.verify_table_route(&table_route, &task)?;
// Safety: checked before.
let region_route = table_route
.region_route(region_id)
.context(error::RegionRouteNotFoundSnafu { region_id })?;
if self.has_migrated(&region_route, &task)? {
info!("Skipping region migration task: {task}");
return Ok(());
}
self.verify_region_leader_peer(&region_route, &task)?;
let procedure =
RegionMigrationProcedure::new(task.clone().into(), self.context_factory.clone());
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
let procedure_id = procedure_with_id.id;
info!("Starting region migration procedure {procedure_id} for {task}");
let procedure_manager = self.procedure_manager.clone();
common_runtime::spawn_bg(async move {
let _ = guard;
let watcher = &mut match procedure_manager.submit(procedure_with_id).await {
Ok(watcher) => watcher,
Err(e) => {
error!(e; "Failed to submit region migration procedure {procedure_id} for {task}");
return;
}
};
if let Err(e) = watcher::wait(watcher).await {
error!(e; "Failed to wait region migration procedure {procedure_id} for {task}");
return;
}
info!("Region migration procedure {procedure_id} for {task} is finished successfully!");
});
Ok(())
}
}
#[cfg(test)]
mod test {
use std::assert_matches::assert_matches;
use common_meta::key::table_route::LogicalTableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::rpc::router::Region;
use super::*;
use crate::procedure::region_migration::test_util::TestingEnv;
#[tokio::test]
async fn test_insert_running_procedure() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(2),
to_peer: Peer::empty(1),
};
// Inserts one
manager
.running_procedures
.write()
.unwrap()
.insert(region_id, task.clone());
let err = manager.submit_procedure(task).await.unwrap_err();
assert_matches!(err, error::Error::MigrationRunning { .. });
}
#[tokio::test]
async fn test_submit_procedure_invalid_task() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(1),
};
let err = manager.submit_procedure(task).await.unwrap_err();
assert_matches!(err, error::Error::InvalidArguments { .. });
}
#[tokio::test]
async fn test_submit_procedure_table_not_found() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
};
let err = manager.submit_procedure(task).await.unwrap_err();
assert_matches!(err, error::Error::TableRouteNotFound { .. });
}
#[tokio::test]
async fn test_submit_procedure_region_route_not_found() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
};
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(3)),
..Default::default()
}];
env.create_physical_table_metadata(table_info, region_routes)
.await;
let err = manager.submit_procedure(task).await.unwrap_err();
assert_matches!(err, error::Error::RegionRouteNotFound { .. });
}
#[tokio::test]
async fn test_submit_procedure_incorrect_from_peer() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
};
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(3)),
..Default::default()
}];
env.create_physical_table_metadata(table_info, region_routes)
.await;
let err = manager.submit_procedure(task).await.unwrap_err();
assert_matches!(err, error::Error::InvalidArguments { .. });
assert!(err
.to_string()
.contains("Invalid region migration `from_peer` argument"));
}
#[tokio::test]
async fn test_submit_procedure_has_migrated() {
common_telemetry::init_default_ut_logging();
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
};
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(2)),
..Default::default()
}];
env.create_physical_table_metadata(table_info, region_routes)
.await;
manager.submit_procedure(task).await.unwrap();
}
#[tokio::test]
async fn test_verify_table_route_error() {
let env = TestingEnv::new();
let context_factory = env.context_factory();
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
};
let err = manager
.verify_table_route(&TableRouteValue::Logical(LogicalTableRouteValue {}), &task)
.unwrap_err();
assert_matches!(err, error::Error::Unexpected { .. });
}
}

View File

@@ -84,7 +84,7 @@ impl RegionMigrationStart {
let table_route = ctx.get_table_route_value().await?;
let region_route = table_route
.region_routes
.region_routes()
.iter()
.find(|route| route.region.id == region_id)
.cloned()
@@ -137,7 +137,6 @@ impl RegionMigrationStart {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -187,10 +186,8 @@ mod tests {
..Default::default()
};
env.table_metadata_manager()
.create_table_metadata(table_info, vec![region_route], HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, vec![region_route])
.await;
let err = state
.retrieve_region_route(&mut ctx, RegionId::new(1024, 3))
@@ -221,10 +218,8 @@ mod tests {
..Default::default()
}];
env.table_metadata_manager()
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -254,10 +249,8 @@ mod tests {
..Default::default()
}];
env.table_metadata_manager()
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -281,10 +274,8 @@ mod tests {
..Default::default()
}];
env.table_metadata_manager()
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let (next, _) = state.next(&mut ctx).await.unwrap();

View File

@@ -187,6 +187,7 @@ mod tests {
use std::assert_matches::assert_matches;
use common_catalog::consts::MITO2_ENGINE;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -409,7 +410,11 @@ mod tests {
}];
env.table_metadata_manager()
.create_table_metadata(table_info, region_routes, HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();

View File

@@ -22,14 +22,17 @@ use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
};
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::region_keeper::{MemoryRegionKeeper, MemoryRegionKeeperRef};
use common_meta::rpc::router::RegionRoute;
use common_meta::sequence::{Sequence, SequenceBuilder};
use common_meta::state_store::KvStateStore;
use common_meta::DatanodeId;
use common_procedure::{Context as ProcedureContext, ProcedureId, Status};
use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::{Context as ProcedureContext, ProcedureId, ProcedureManagerRef, Status};
use common_procedure_test::MockContextProvider;
use common_telemetry::debug;
use common_time::util::current_time_millis;
@@ -89,6 +92,7 @@ pub struct TestingEnv {
mailbox_ctx: MailboxContext,
opening_region_keeper: MemoryRegionKeeperRef,
server_addr: String,
procedure_manager: ProcedureManagerRef,
}
impl TestingEnv {
@@ -103,11 +107,15 @@ impl TestingEnv {
let mailbox_ctx = MailboxContext::new(mailbox_sequence);
let opening_region_keeper = Arc::new(MemoryRegionKeeper::default());
let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
let procedure_manager = Arc::new(LocalManager::new(ManagerConfig::default(), state_store));
Self {
table_metadata_manager,
opening_region_keeper,
mailbox_ctx,
server_addr: "localhost".to_string(),
procedure_manager,
}
}
@@ -144,6 +152,27 @@ impl TestingEnv {
provider: Arc::new(MockContextProvider::default()),
}
}
/// Returns the [ProcedureManagerRef].
pub fn procedure_manager(&self) -> &ProcedureManagerRef {
&self.procedure_manager
}
// Creates a table metadata with the physical table route.
pub async fn create_physical_table_metadata(
&self,
table_info: RawTableInfo,
region_routes: Vec<RegionRoute>,
) {
self.table_metadata_manager
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();
}
}
/// Generates a [InstructionReply::OpenRegion] reply.
@@ -369,7 +398,11 @@ impl ProcedureMigrationTestSuite {
) {
self.env
.table_metadata_manager()
.create_table_metadata(table_info, region_routes, HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();
}
@@ -377,7 +410,7 @@ impl ProcedureMigrationTestSuite {
/// Verifies table metadata after region migration.
pub(crate) async fn verify_table_metadata(&self) {
let region_id = self.context.persistent_ctx.region_id;
let region_routes = self
let table_route = self
.env
.table_metadata_manager
.table_route_manager()
@@ -385,22 +418,25 @@ impl ProcedureMigrationTestSuite {
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
.into_inner();
let region_routes = table_route.region_routes();
let expected_leader_id = self.context.persistent_ctx.to_peer.id;
let removed_follower_id = self.context.persistent_ctx.from_peer.id;
let region_route = region_routes
.into_iter()
.iter()
.find(|route| route.region.id == region_id)
.unwrap();
assert!(!region_route.is_leader_downgraded());
assert_eq!(region_route.leader_peer.unwrap().id, expected_leader_id);
assert_eq!(
region_route.leader_peer.as_ref().unwrap().id,
expected_leader_id
);
assert!(!region_route
.follower_peers
.into_iter()
.iter()
.any(|route| route.id == removed_follower_id))
}
}

View File

@@ -43,6 +43,7 @@ impl UpdateMetadata {
let table_id = region_id.table_id();
let current_table_route_value = ctx.get_table_route_value().await?;
// TODO(weny): ensures the leader region peer is the `from_peer`.
if let Err(err) = table_metadata_manager
.update_leader_region_status(table_id, current_table_route_value, |route| {
if route.region.id == region_id
@@ -74,7 +75,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -136,12 +136,10 @@ mod tests {
},
];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -190,11 +188,10 @@ mod tests {
..Default::default()
}];
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -212,7 +209,7 @@ mod tests {
// It should remain unchanged.
assert_eq!(latest_table_route.version(), 0);
assert!(!latest_table_route.region_routes[0].is_leader_downgraded());
assert!(!latest_table_route.region_routes()[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
@@ -233,11 +230,10 @@ mod tests {
..Default::default()
}];
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -253,7 +249,7 @@ mod tests {
.unwrap()
.unwrap();
assert!(latest_table_route.region_routes[0].is_leader_downgraded());
assert!(latest_table_route.region_routes()[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
}

View File

@@ -59,7 +59,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -128,12 +127,10 @@ mod tests {
region_routes
};
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
let old_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -166,15 +163,14 @@ mod tests {
state.rollback_downgraded_region(&mut ctx).await.unwrap();
let region_routes = table_metadata_manager
let table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
assert_eq!(expected_region_routes, region_routes);
.into_inner();
assert_eq!(&expected_region_routes, table_route.region_routes());
}
#[tokio::test]
@@ -214,11 +210,10 @@ mod tests {
region_routes
};
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -229,14 +224,13 @@ mod tests {
assert!(ctx.volatile_ctx.table_route.is_none());
let region_routes = table_metadata_manager
let table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
assert_eq!(expected_region_routes, region_routes);
.into_inner();
assert_eq!(&expected_region_routes, table_route.region_routes());
}
}

View File

@@ -33,7 +33,7 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
let mut region_routes = table_route_value.region_routes.clone();
let mut region_routes = table_route_value.region_routes().clone();
let region_route = region_routes
.iter_mut()
.find(|route| route.region.id == region_id)
@@ -81,7 +81,7 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
let region_routes = table_route_value.region_routes.clone();
let region_routes = table_route_value.region_routes().clone();
let region_route = region_routes
.into_iter()
.find(|route| route.region.id == region_id)
@@ -176,7 +176,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -225,11 +224,8 @@ mod tests {
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -254,11 +250,8 @@ mod tests {
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -285,11 +278,8 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let new_region_routes = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -326,12 +316,10 @@ mod tests {
},
];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -385,11 +373,8 @@ mod tests {
leader_status: None,
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(!updated);
@@ -411,11 +396,8 @@ mod tests {
leader_status: None,
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(updated);
@@ -437,11 +419,8 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
env.create_physical_table_metadata(table_info, region_routes)
.await;
let err = state.check_metadata_updated(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
@@ -470,24 +449,23 @@ mod tests {
.unwrap();
ctx.volatile_ctx.opening_region_guard = Some(guard);
env.create_physical_table_metadata(table_info, region_routes)
.await;
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
let region_routes = table_metadata_manager
let table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
.into_inner();
let region_routes = table_route.region_routes();
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_none());

View File

@@ -89,7 +89,7 @@ impl UpgradeCandidateRegion {
Instruction::UpgradeRegion(UpgradeRegion {
region_id,
last_entry_id,
wait_for_replay_secs: Some(self.replay_timeout.as_secs()),
wait_for_replay_timeout: Some(self.replay_timeout),
})
}

View File

@@ -100,12 +100,12 @@ fn test_region_request_builder() {
let procedure = CreateTableProcedure::new(
1,
create_table_task(),
test_data::new_region_routes(),
TableRouteValue::physical(test_data::new_region_routes()),
HashMap::default(),
test_data::new_ddl_context(Arc::new(DatanodeClients::default())),
);
let template = procedure.new_region_request_builder().unwrap();
let template = procedure.new_region_request_builder(None).unwrap();
let expected = PbCreateRegionRequest {
region_id: 0,
@@ -191,7 +191,7 @@ async fn test_on_datanode_create_regions() {
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(),
region_routes,
TableRouteValue::physical(region_routes),
HashMap::default(),
test_data::new_ddl_context(datanode_manager),
);
@@ -247,7 +247,7 @@ async fn test_on_datanode_drop_regions() {
let procedure = DropTableProcedure::new(
1,
drop_table_task,
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes)),
DeserializedValueWithBytes::from_inner(TableRouteValue::physical(region_routes)),
DeserializedValueWithBytes::from_inner(TableInfoValue::new(test_data::new_table_info())),
test_data::new_ddl_context(datanode_manager),
);
@@ -373,7 +373,7 @@ async fn test_submit_alter_region_requests() {
.table_metadata_manager
.create_table_metadata(
table_info.clone(),
region_routes.clone(),
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await

View File

@@ -188,6 +188,7 @@ mod tests {
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -291,7 +292,11 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(vec![region_route]),
HashMap::default(),
)
.await
.unwrap();
@@ -378,7 +383,11 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(vec![region_route]),
HashMap::default(),
)
.await
.unwrap();

View File

@@ -143,7 +143,7 @@ async fn get_leader_peer_ids(
.context(error::TableMetadataManagerSnafu)
.map(|route| {
route.map_or_else(Vec::new, |route| {
find_leaders(&route.region_routes)
find_leaders(route.region_routes())
.into_iter()
.map(|peer| peer.id)
.collect()

View File

@@ -16,7 +16,10 @@ mod health;
mod heartbeat;
mod leader;
mod meta;
// TODO(weny): removes it.
mod node_lease;
#[allow(dead_code)]
mod region_migration;
mod route;
mod util;

View File

@@ -0,0 +1,207 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::num::ParseIntError;
use std::str::FromStr;
use common_meta::peer::Peer;
use common_meta::ClusterId;
use serde::Serialize;
use snafu::ResultExt;
use store_api::storage::RegionId;
use tonic::codegen::http;
use super::HttpHandler;
use crate::error::{self, Error, Result};
pub trait PeerLookup: Send + Sync {
fn peer(&self, peer_id: u64) -> Option<Peer>;
}
/// The handler of submitting migration task.
pub struct SubmitRegionMigrationTaskHandler {
// TODO(weny): waits for https://github.com/GreptimeTeam/greptimedb/pull/3014
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct SubmitRegionMigrationTaskRequest {
cluster_id: ClusterId,
region_id: RegionId,
from_peer_id: u64,
to_peer_id: u64,
}
#[derive(Debug, Serialize)]
struct SubmitRegionMigrationTaskResponse {
procedure_id: String,
}
fn parse_num_parameter_with_default<T, F>(
key: &str,
params: &HashMap<String, String>,
default_fn: F,
) -> Result<T>
where
F: Fn(&str) -> Result<T>,
T: FromStr<Err = ParseIntError>,
{
let parse_result = if let Some(id) = params.get(key) {
id.parse::<T>().context(error::ParseNumSnafu {
err_msg: format!("invalid {key}: {id}"),
})?
} else {
default_fn(key)?
};
Ok(parse_result)
}
impl TryFrom<&HashMap<String, String>> for SubmitRegionMigrationTaskRequest {
type Error = Error;
fn try_from(params: &HashMap<String, String>) -> Result<Self> {
let cluster_id = parse_num_parameter_with_default("cluster_id", params, |_| Ok(0))?;
let region_id: u64 = parse_num_parameter_with_default("region_id", params, |key| {
error::MissingRequiredParameterSnafu { param: key }.fail()
})?;
let from_peer_id: u64 = parse_num_parameter_with_default("from_peer_id", params, |key| {
error::MissingRequiredParameterSnafu { param: key }.fail()
})?;
let to_peer_id: u64 = parse_num_parameter_with_default("to_peer_id", params, |key| {
error::MissingRequiredParameterSnafu { param: key }.fail()
})?;
Ok(SubmitRegionMigrationTaskRequest {
cluster_id,
region_id: RegionId::from_u64(region_id),
from_peer_id,
to_peer_id,
})
}
}
impl SubmitRegionMigrationTaskHandler {
/// Submits a region migration task, returns the procedure id.
async fn handle_submit(
&self,
_task: SubmitRegionMigrationTaskRequest,
) -> Result<SubmitRegionMigrationTaskResponse> {
// TODO(weny): waits for https://github.com/GreptimeTeam/greptimedb/pull/3014
todo!()
}
}
#[async_trait::async_trait]
impl HttpHandler for SubmitRegionMigrationTaskHandler {
async fn handle(
&self,
_: &str,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let request = SubmitRegionMigrationTaskRequest::try_from(params)?;
let response = self.handle_submit(request).await?;
http::Response::builder()
.status(http::StatusCode::OK)
.body(serde_json::to_string(&response).with_context(|_| {
error::SerializeToJsonSnafu {
input: format!("{response:?}"),
}
})?)
.context(error::InvalidHttpBodySnafu)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use crate::error;
#[test]
fn test_parse_migration_task_req() {
use store_api::storage::RegionId;
use crate::service::admin::region_migration::SubmitRegionMigrationTaskRequest;
let params = HashMap::from([
("cluster_id".to_string(), "10".to_string()),
(
"region_id".to_string(),
RegionId::new(1024, 1).as_u64().to_string(),
),
("from_peer_id".to_string(), "1".to_string()),
("to_peer_id".to_string(), "2".to_string()),
]);
let task_req = SubmitRegionMigrationTaskRequest::try_from(&params).unwrap();
assert_eq!(
SubmitRegionMigrationTaskRequest {
cluster_id: 10,
region_id: RegionId::new(1024, 1),
from_peer_id: 1,
to_peer_id: 2,
},
task_req
);
let params = HashMap::from([
(
"region_id".to_string(),
RegionId::new(1024, 1).as_u64().to_string(),
),
("from_peer_id".to_string(), "1".to_string()),
("to_peer_id".to_string(), "2".to_string()),
]);
let task_req = SubmitRegionMigrationTaskRequest::try_from(&params).unwrap();
assert_eq!(
SubmitRegionMigrationTaskRequest {
cluster_id: 0,
region_id: RegionId::new(1024, 1),
from_peer_id: 1,
to_peer_id: 2,
},
task_req
);
let required_fields = [
(
"region_id".to_string(),
RegionId::new(1024, 1).as_u64().to_string(),
),
("from_peer_id".to_string(), "1".to_string()),
("to_peer_id".to_string(), "2".to_string()),
];
for i in 0..required_fields.len() {
let params = required_fields[..i]
.iter()
.cloned()
.collect::<HashMap<_, _>>();
let err = SubmitRegionMigrationTaskRequest::try_from(&params).unwrap_err();
assert_matches!(err, error::Error::MissingRequiredParameter { .. });
assert!(err.to_string().contains(&required_fields[i].0));
}
}
}

View File

@@ -12,17 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_catalog::format_full_table_name;
use std::collections::HashMap;
use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::error::{ExternalSnafu, Result as MetaResult};
use common_meta::key::table_route::{
LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
};
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::sequence::SequenceRef;
use common_meta::wal::{allocate_region_wal_options, WalOptionsAllocatorRef};
use common_telemetry::{debug, warn};
use common_meta::ClusterId;
use common_telemetry::debug;
use snafu::{ensure, ResultExt};
use store_api::storage::{RegionId, TableId, MAX_REGION_SEQ};
use store_api::storage::{RegionId, RegionNumber, TableId, MAX_REGION_SEQ};
use crate::error::{self, Result, TooManyPartitionsSnafu};
use crate::metasrv::{SelectorContext, SelectorRef};
@@ -49,6 +55,83 @@ impl MetaSrvTableMetadataAllocator {
wal_options_allocator,
}
}
async fn create_table_route(
&self,
cluster_id: ClusterId,
table_id: TableId,
task: &CreateTableTask,
) -> Result<TableRouteValue> {
let table_route = if task.create_table.engine == METRIC_ENGINE {
TableRouteValue::Logical(LogicalTableRouteValue {})
} else {
let regions = task.partitions.len();
ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu);
let mut peers = self
.selector
.select(
cluster_id,
&self.ctx,
SelectorOptions {
min_required_items: regions,
allow_duplication: true,
},
)
.await?;
ensure!(
peers.len() >= regions,
error::NoEnoughAvailableDatanodeSnafu {
required: regions,
available: peers.len(),
}
);
peers.truncate(regions);
let region_routes = task
.partitions
.iter()
.enumerate()
.map(|(i, partition)| {
let region = Region {
id: RegionId::new(table_id, i as RegionNumber),
partition: Some(partition.clone().into()),
..Default::default()
};
let peer = peers[i % peers.len()].clone();
RegionRoute {
region,
leader_peer: Some(peer.into()),
..Default::default()
}
})
.collect::<Vec<_>>();
TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
};
Ok(table_route)
}
fn create_wal_options(
&self,
table_route: &TableRouteValue,
) -> MetaResult<HashMap<RegionNumber, String>> {
match table_route {
TableRouteValue::Physical(x) => {
let region_numbers = x
.region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
}
TableRouteValue::Logical(_) => Ok(HashMap::new()),
}
}
}
#[async_trait::async_trait]
@@ -58,23 +141,15 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> MetaResult<TableMetadata> {
let (table_id, region_routes) = handle_create_region_routes(
ctx.cluster_id,
task,
&self.ctx,
&self.selector,
&self.table_id_sequence,
)
.await
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)?;
let table_id = self.table_id_sequence.next().await? as TableId;
let region_numbers = region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
let region_wal_options =
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
let table_route = self
.create_table_route(ctx.cluster_id, table_id, task)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -83,84 +158,8 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
Ok(TableMetadata {
table_id,
region_routes,
table_route,
region_wal_options,
})
}
}
/// pre-allocates create table's table id and region routes.
async fn handle_create_region_routes(
cluster_id: u64,
task: &CreateTableTask,
ctx: &SelectorContext,
selector: &SelectorRef,
table_id_sequence: &SequenceRef,
) -> Result<(TableId, Vec<RegionRoute>)> {
let table_info = &task.table_info;
let partitions = &task.partitions;
let mut peers = selector
.select(
cluster_id,
ctx,
SelectorOptions {
min_required_items: partitions.len(),
allow_duplication: true,
},
)
.await?;
if peers.len() < partitions.len() {
warn!(
"Create table failed due to no enough available datanodes, table: {}, partition number: {}, datanode number: {}",
format_full_table_name(
&table_info.catalog_name,
&table_info.schema_name,
&table_info.name
),
partitions.len(),
peers.len()
);
return error::NoEnoughAvailableDatanodeSnafu {
required: partitions.len(),
available: peers.len(),
}
.fail();
}
// We don't need to keep all peers, just truncate it to the number of partitions.
// If the peers are not enough, some peers will be used for multiple partitions.
peers.truncate(partitions.len());
let table_id = table_id_sequence
.next()
.await
.context(error::NextSequenceSnafu)? as u32;
ensure!(
partitions.len() <= MAX_REGION_SEQ as usize,
TooManyPartitionsSnafu
);
let region_routes = partitions
.iter()
.enumerate()
.map(|(i, partition)| {
let region = Region {
id: RegionId::new(table_id, i as u32),
partition: Some(partition.clone().into()),
..Default::default()
};
let peer = peers[i % peers.len()].clone();
RegionRoute {
region,
leader_peer: Some(peer.into()),
follower_peers: vec![], // follower_peers is not supported at the moment
leader_status: None,
}
})
.collect::<Vec<_>>();
Ok((table_id, region_routes))
}

View File

@@ -17,6 +17,7 @@ use std::sync::Arc;
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
@@ -145,7 +146,11 @@ pub(crate) async fn prepare_table_region_and_info_value(
region_route_factory(4, 3),
];
table_metadata_manager
.create_table_metadata(table_info, region_routes, HashMap::default())
.create_table_metadata(
table_info,
TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
.unwrap();
}

View File

@@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use catalog::kvbackend::MetaKvBackend;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackendRef;
@@ -114,7 +115,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(1, "table_1", regions.clone().into_iter()).into(),
vec![
TableRouteValue::physical(vec![
RegionRoute {
region: Region {
id: 3.into(),
@@ -169,7 +170,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
],
]),
region_wal_options.clone(),
)
.await
@@ -178,7 +179,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(2, "table_2", regions.clone().into_iter()).into(),
vec![
TableRouteValue::physical(vec![
RegionRoute {
region: Region {
id: 1.into(),
@@ -239,7 +240,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
],
]),
region_wal_options,
)
.await

View File

@@ -19,7 +19,7 @@ use api::v1::Rows;
use common_meta::key::table_route::TableRouteManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::peer::Peer;
use common_meta::rpc::router::{convert_to_region_leader_map, RegionRoutes};
use common_meta::rpc::router::RegionRoutes;
use common_query::prelude::Expr;
use datafusion_expr::{BinaryExpr, Expr as DfExpr, Operator};
use datatypes::prelude::Value;
@@ -76,56 +76,7 @@ impl PartitionRuleManager {
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
Ok(RegionRoutes(route.region_routes))
}
/// Find datanodes of corresponding regions of given table.
pub async fn find_region_datanodes(
&self,
table_id: TableId,
regions: Vec<RegionNumber>,
) -> Result<HashMap<Peer, Vec<RegionNumber>>> {
let route = self
.table_route_manager
.get(table_id)
.await
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
let mut datanodes = HashMap::with_capacity(regions.len());
let region_map = convert_to_region_leader_map(&route.region_routes);
for region in regions.iter() {
let datanode = *region_map.get(region).context(error::FindDatanodeSnafu {
table_id,
region: *region,
})?;
datanodes
.entry(datanode.clone())
.or_insert_with(Vec::new)
.push(*region);
}
Ok(datanodes)
}
/// Find all leader peers of given table.
pub async fn find_table_region_leaders(&self, table_id: TableId) -> Result<Vec<Peer>> {
let route = self
.table_route_manager
.get(table_id)
.await
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
let mut peers = Vec::with_capacity(route.region_routes.len());
for peer in &route.region_routes {
peers.push(peer.leader_peer.clone().with_context(|| FindLeaderSnafu {
region_id: peer.region.id,
table_id,
})?);
}
Ok(peers)
Ok(RegionRoutes(route.region_routes().clone()))
}
pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
@@ -136,13 +87,15 @@ impl PartitionRuleManager {
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
let region_routes = route.region_routes();
ensure!(
!route.region_routes.is_empty(),
!region_routes.is_empty(),
error::FindTableRoutesSnafu { table_id }
);
let mut partitions = Vec::with_capacity(route.region_routes.len());
for r in route.region_routes.iter() {
let mut partitions = Vec::with_capacity(region_routes.len());
for r in region_routes {
let partition = r
.region
.partition

View File

@@ -58,7 +58,7 @@ openmetrics-parser = "0.4"
opensrv-mysql = { git = "https://github.com/MichaelScofield/opensrv.git", rev = "1676c1d" }
opentelemetry-proto.workspace = true
parking_lot = "0.12"
pgwire = "0.17"
pgwire = "0.18"
pin-project = "1.0"
postgres-types = { version = "0.2", features = ["with-chrono-0_4"] }
pprof = { version = "0.13", features = [

View File

@@ -21,8 +21,8 @@ use std::sync::Arc;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_time::timezone::system_time_zone_name;
use common_time::TimeZone;
use common_time::timezone::system_timezone_name;
use common_time::Timezone;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
@@ -55,7 +55,7 @@ static SELECT_TIME_DIFF_FUNC_PATTERN: Lazy<Regex> =
static SHOW_SQL_MODE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'sql_mode'(.*))").unwrap());
// Time zone settings
// Timezone settings
static SET_TIME_ZONE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)^SET TIME_ZONE\s*=\s*'(\S+)'").unwrap());
@@ -200,11 +200,8 @@ fn select_variable(query: &str, query_context: QueryContextRef) -> Option<Output
// get value of variables from known sources or fallback to defaults
let value = match var_as[0] {
"time_zone" => query_context
.time_zone()
.map(|tz| tz.to_string())
.unwrap_or_else(|| "".to_owned()),
"system_time_zone" => system_time_zone_name(),
"time_zone" => query_context.timezone().to_string(),
"system_time_zone" => system_timezone_name(),
_ => VAR_VALUES
.get(var_as[0])
.map(|v| v.to_string())
@@ -271,8 +268,8 @@ fn check_set_variables(query: &str, session: SessionRef) -> Option<Output> {
if let Some(captures) = SET_TIME_ZONE_PATTERN.captures(query) {
// get the capture
let tz = captures.get(1).unwrap();
if let Ok(timezone) = TimeZone::from_tz_string(tz.as_str()) {
session.set_time_zone(timezone);
if let Ok(timezone) = Timezone::from_tz_string(tz.as_str()) {
session.set_timezone(timezone);
return Some(Output::AffectedRows(0));
}
}
@@ -331,6 +328,7 @@ fn get_version() -> String {
#[cfg(test)]
mod test {
use common_time::timezone::set_default_timezone;
use session::context::{Channel, QueryContext};
use session::Session;
@@ -390,16 +388,16 @@ mod test {
+-----------------+------------------------+";
test(query, expected);
// set sysstem timezone
std::env::set_var("TZ", "Asia/Shanghai");
// set system timezone
set_default_timezone(Some("Asia/Shanghai")).unwrap();
// complex variables
let query = "/* mysql-connector-java-8.0.17 (Revision: 16a712ddb3f826a1933ab42b0039f7fb9eebc6ec) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout;";
let expected = "\
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | +08:00 | | REPEATABLE-READ | 31536000 |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+";
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+
| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | Asia/Shanghai | Asia/Shanghai | REPEATABLE-READ | 31536000 |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+";
test(query, expected);
let query = "show variables";
@@ -437,8 +435,17 @@ mod test {
}
#[test]
fn test_set_time_zone() {
fn test_set_timezone() {
// test default is UTC when no config in greptimedb
{
let session = Arc::new(Session::new(None, Channel::Mysql));
let query_context = session.new_query_context();
assert_eq!("UTC", query_context.timezone().to_string());
}
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let session = Arc::new(Session::new(None, Channel::Mysql));
let query_context = session.new_query_context();
assert_eq!("Asia/Shanghai", query_context.timezone().to_string());
let output = check(
"set time_zone = 'UTC'",
QueryContext::arc(),
@@ -451,7 +458,7 @@ mod test {
_ => unreachable!(),
}
let query_context = session.new_query_context();
assert_eq!("UTC", query_context.time_zone().unwrap().to_string());
assert_eq!("UTC", query_context.timezone().to_string());
let output = check("select @@time_zone", query_context.clone(), session.clone());
match output.unwrap() {

View File

@@ -193,10 +193,12 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Value::Binary(v) => row_writer.write_col(v.deref())?,
Value::Date(v) => row_writer.write_col(v.to_chrono_date())?,
// convert datetime and timestamp to timezone of current connection
Value::DateTime(v) => row_writer
.write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?,
Value::Timestamp(v) => row_writer
.write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?,
Value::DateTime(v) => row_writer.write_col(
v.to_chrono_datetime_with_timezone(Some(query_context.timezone())),
)?,
Value::Timestamp(v) => row_writer.write_col(
v.to_chrono_datetime_with_timezone(Some(query_context.timezone())),
)?,
Value::Interval(v) => row_writer.write_col(v.to_iso8601_string())?,
Value::Duration(v) => row_writer.write_col(v.to_std_duration())?,
Value::List(_) => {
@@ -208,7 +210,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
})
}
Value::Time(v) => row_writer
.write_col(v.to_timezone_aware_string(query_context.time_zone()))?,
.write_col(v.to_timezone_aware_string(Some(query_context.timezone())))?,
Value::Decimal128(v) => row_writer.write_col(v.to_string())?,
}
}

View File

@@ -31,7 +31,6 @@ use std::sync::Arc;
use ::auth::UserProviderRef;
use derive_builder::Builder;
use pgwire::api::auth::ServerParameterProvider;
use pgwire::api::store::MemPortalStore;
use pgwire::api::ClientInfo;
pub use server::PostgresServer;
use session::context::Channel;
@@ -40,7 +39,6 @@ use session::Session;
use self::auth_handler::PgLoginVerifier;
use self::handler::DefaultQueryParser;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
use crate::SqlPlan;
pub(crate) struct GreptimeDBStartupParameters {
version: &'static str,
@@ -76,7 +74,6 @@ pub struct PostgresServerHandler {
param_provider: Arc<GreptimeDBStartupParameters>,
session: Arc<Session>,
portal_store: Arc<MemPortalStore<SqlPlan>>,
query_parser: Arc<DefaultQueryParser>,
}
@@ -99,7 +96,6 @@ impl MakePostgresServerHandler {
param_provider: self.param_provider.clone(),
session: session.clone(),
portal_store: Arc::new(MemPortalStore::new()),
query_parser: Arc::new(DefaultQueryParser::new(self.query_handler.clone(), session)),
}
}

View File

@@ -25,7 +25,6 @@ use pgwire::api::portal::{Format, Portal};
use pgwire::api::query::{ExtendedQueryHandler, SimpleQueryHandler, StatementOrPortal};
use pgwire::api::results::{DataRowEncoder, DescribeResponse, QueryResponse, Response, Tag};
use pgwire::api::stmt::QueryParser;
use pgwire::api::store::MemPortalStore;
use pgwire::api::{ClientInfo, Type};
use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
use query::query_engine::DescribeResult;
@@ -192,11 +191,6 @@ impl QueryParser for DefaultQueryParser {
impl ExtendedQueryHandler for PostgresServerHandler {
type Statement = SqlPlan;
type QueryParser = DefaultQueryParser;
type PortalStore = MemPortalStore<Self::Statement>;
fn portal_store(&self) -> Arc<Self::PortalStore> {
self.portal_store.clone()
}
fn query_parser(&self) -> Arc<Self::QueryParser> {
self.query_parser.clone()

View File

@@ -21,7 +21,8 @@ use arc_swap::ArcSwap;
use auth::UserInfoRef;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::{build_db_string, parse_catalog_and_schema_from_db_string};
use common_time::TimeZone;
use common_time::timezone::get_timezone;
use common_time::Timezone;
use derive_builder::Builder;
use sql::dialect::{Dialect, GreptimeDbDialect, MySqlDialect, PostgreSqlDialect};
@@ -35,7 +36,7 @@ pub struct QueryContext {
current_catalog: String,
current_schema: String,
current_user: ArcSwap<Option<UserInfoRef>>,
time_zone: Option<TimeZone>,
timezone: Timezone,
sql_dialect: Box<dyn Dialect + Send + Sync>,
}
@@ -57,7 +58,7 @@ impl From<&RegionRequestHeader> for QueryContext {
current_catalog: catalog.to_string(),
current_schema: schema.to_string(),
current_user: Default::default(),
time_zone: Default::default(),
timezone: get_timezone(None),
sql_dialect: Box::new(GreptimeDbDialect {}),
}
}
@@ -115,8 +116,8 @@ impl QueryContext {
}
#[inline]
pub fn time_zone(&self) -> Option<TimeZone> {
self.time_zone.clone()
pub fn timezone(&self) -> Timezone {
self.timezone.clone()
}
#[inline]
@@ -142,7 +143,7 @@ impl QueryContextBuilder {
current_user: self
.current_user
.unwrap_or_else(|| ArcSwap::new(Arc::new(None))),
time_zone: self.time_zone.unwrap_or(None),
timezone: self.timezone.unwrap_or(get_timezone(None)),
sql_dialect: self
.sql_dialect
.unwrap_or_else(|| Box::new(GreptimeDbDialect {})),

View File

@@ -21,7 +21,8 @@ use arc_swap::ArcSwap;
use auth::UserInfoRef;
use common_catalog::build_db_string;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_time::TimeZone;
use common_time::timezone::get_timezone;
use common_time::Timezone;
use context::QueryContextBuilder;
use crate::context::{Channel, ConnInfo, QueryContextRef};
@@ -33,7 +34,7 @@ pub struct Session {
schema: ArcSwap<String>,
user_info: ArcSwap<UserInfoRef>,
conn_info: ConnInfo,
time_zone: ArcSwap<Option<TimeZone>>,
timezone: ArcSwap<Timezone>,
}
pub type SessionRef = Arc<Session>;
@@ -45,7 +46,7 @@ impl Session {
schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.into())),
user_info: ArcSwap::new(Arc::new(auth::userinfo_by_name(None))),
conn_info: ConnInfo::new(addr, channel),
time_zone: ArcSwap::new(Arc::new(None)),
timezone: ArcSwap::new(Arc::new(get_timezone(None))),
}
}
@@ -58,7 +59,7 @@ impl Session {
.current_catalog(self.catalog.load().to_string())
.current_schema(self.schema.load().to_string())
.sql_dialect(self.conn_info.channel.dialect())
.time_zone((**self.time_zone.load()).clone())
.timezone((**self.timezone.load()).clone())
.build()
}
@@ -73,13 +74,13 @@ impl Session {
}
#[inline]
pub fn time_zone(&self) -> Option<TimeZone> {
self.time_zone.load().as_ref().clone()
pub fn timezone(&self) -> Timezone {
self.timezone.load().as_ref().clone()
}
#[inline]
pub fn set_time_zone(&self, tz: Option<TimeZone>) {
let _ = self.time_zone.swap(Arc::new(tz));
pub fn set_timezone(&self, tz: Timezone) {
let _ = self.timezone.swap(Arc::new(tz));
}
#[inline]

View File

@@ -521,6 +521,7 @@ mod tests {
use api::v1::ColumnDataType;
use common_time::timestamp::TimeUnit;
use common_time::timezone::set_default_timezone;
use datatypes::types::BooleanType;
use datatypes::value::OrderedFloat;
@@ -696,7 +697,7 @@ mod tests {
#[test]
pub fn test_parse_datetime_literal() {
std::env::set_var("TZ", "Asia/Shanghai");
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let value = sql_value_to_value(
"datetime_col",
&ConcreteDataType::datetime_datatype(),

View File

@@ -79,9 +79,9 @@ impl From<PbGrantedRegion> for GrantedRegion {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum RegionRole {
// Readonly region(mito2), Readonly region(file).
// Readonly region(mito2)
Follower,
// Writable region(mito2).
// Writable region(mito2), Readonly region(file).
Leader,
}

View File

@@ -521,7 +521,7 @@ CREATE TABLE {table_name} (
.unwrap()
.into_inner();
let region_to_dn_map = region_distribution(&table_route_value.region_routes)
let region_to_dn_map = region_distribution(table_route_value.region_routes())
.unwrap()
.iter()
.map(|(k, v)| (v[0], *k))

View File

@@ -216,7 +216,7 @@ mod tests {
.unwrap()
.into_inner();
let region_to_dn_map = region_distribution(&table_route_value.region_routes)
let region_to_dn_map = region_distribution(table_route_value.region_routes())
.unwrap()
.iter()
.map(|(k, v)| (v[0], *k))

View File

@@ -219,8 +219,8 @@ pub async fn test_mysql_timezone(store_type: StorageType) {
.unwrap();
let _ = conn.execute("SET time_zone = 'UTC'").await.unwrap();
let time_zone = conn.fetch_all("SELECT @@time_zone").await.unwrap();
assert_eq!(time_zone[0].get::<String, usize>(0), "UTC");
let timezone = conn.fetch_all("SELECT @@time_zone").await.unwrap();
assert_eq!(timezone[0].get::<String, usize>(0), "UTC");
// test data
let _ = conn