diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs index 1b463023a8..a528e605b4 100644 --- a/src/common/meta/src/rpc/router.rs +++ b/src/common/meta/src/rpc/router.rs @@ -148,7 +148,7 @@ impl TableRoute { pub fn new(table: Table, region_routes: Vec) -> Self { let region_leaders = region_routes .iter() - .map(|x| (x.region.id as RegionNumber, x.leader_peer.clone())) + .map(|x| (x.region.id.region_number(), x.leader_peer.clone())) .collect::>(); Self { @@ -265,13 +265,13 @@ impl TableRoute { .collect() } - pub fn find_leader_regions(&self, datanode: &Peer) -> Vec { + pub fn find_leader_regions(&self, datanode: &Peer) -> Vec { self.region_routes .iter() .filter_map(|x| { if let Some(peer) = &x.leader_peer { if peer == datanode { - return Some(x.region.id as u32); + return Some(x.region.id.region_number()); } } None @@ -340,7 +340,7 @@ pub struct Region { impl From for Region { fn from(r: PbRegion) -> Self { Self { - id: r.id, + id: r.id.into(), name: r.name, partition: r.partition.map(Into::into), attrs: r.attrs, @@ -351,7 +351,7 @@ impl From for Region { impl From for PbRegion { fn from(region: Region) -> Self { Self { - id: region.id, + id: region.id.into(), name: region.name, partition: region.partition.map(Into::into), attrs: region.attrs, @@ -668,7 +668,7 @@ mod tests { region_routes: vec![ RegionRoute { region: Region { - id: 1, + id: 1.into(), name: "r1".to_string(), partition: None, attrs: HashMap::new(), @@ -678,7 +678,7 @@ mod tests { }, RegionRoute { region: Region { - id: 2, + id: 2.into(), name: "r2".to_string(), partition: None, attrs: HashMap::new(), diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs index 7bb18d3589..c819a76c22 100644 --- a/src/frontend/src/instance/distributed.rs +++ b/src/frontend/src/instance/distributed.rs @@ -58,7 +58,7 @@ use sql::statements::create::{PartitionEntry, Partitions}; use sql::statements::statement::Statement; use sql::statements::{self, sql_value_to_value}; use store_api::storage::RegionNumber; -use table::engine::{self, TableReference}; +use table::engine::TableReference; use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType}; use table::requests::TableOptions; use table::table::AlterContext; @@ -337,7 +337,7 @@ impl DistInstance { for table_route in &route_response.table_routes { let should_send_rpc = table_route.region_routes.iter().any(|route| { if let Some(n) = region_number { - n == engine::region_number(route.region.id) + n == route.region.id.region_number() } else { true } @@ -696,7 +696,7 @@ fn create_partitions_stmt(partitions: Vec) -> Result TableCreator { } // We need to create that region. - let region_id = region_id(self.data.request.id, *number); + let region_id = RegionId::new(self.data.request.id, *number); let region_desc = RegionDescriptorBuilder::default() .id(region_id) .name(region_name.clone()) diff --git a/src/mito/src/engine/tests.rs b/src/mito/src/engine/tests.rs index 2733bb6d62..96f649bdba 100644 --- a/src/mito/src/engine/tests.rs +++ b/src/mito/src/engine/tests.rs @@ -30,7 +30,6 @@ use storage::region::RegionImpl; use storage::EngineImpl; use store_api::manifest::Manifest; use store_api::storage::{ReadContext, ScanRequest}; -use table::engine::region_id; use table::metadata::TableType; use table::requests::{ AddColumnRequest, AlterKind, DeleteRequest, FlushTableRequest, TableOptions, @@ -525,16 +524,6 @@ async fn test_open_table() { assert_eq!(reopened.manifest().last_version(), 1); } -#[test] -fn test_region_id() { - assert_eq!(1, region_id(0, 1)); - assert_eq!(4294967296, region_id(1, 0)); - assert_eq!(4294967297, region_id(1, 1)); - assert_eq!(4294967396, region_id(1, 100)); - assert_eq!(8589934602, region_id(2, 10)); - assert_eq!(18446744069414584330, region_id(u32::MAX, 10)); -} - fn new_add_columns_req( table_id: TableId, new_tag: &ColumnSchema, diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs index bc5cc16ee7..57c0e012ef 100644 --- a/src/mito/src/table.rs +++ b/src/mito/src/table.rs @@ -338,7 +338,7 @@ impl Table for MitoTable { Ok(regions .values() .map(|region| RegionStat { - region_id: region.id(), + region_id: region.id().into(), disk_usage_bytes: region.disk_usage_bytes(), }) .collect()) diff --git a/src/partition/src/error.rs b/src/partition/src/error.rs index 42ad5719c6..03e7799b54 100644 --- a/src/partition/src/error.rs +++ b/src/partition/src/error.rs @@ -18,7 +18,7 @@ use common_error::prelude::*; use common_query::prelude::Expr; use datafusion_common::ScalarValue; use snafu::{Location, Snafu}; -use store_api::storage::RegionId; +use store_api::storage::{RegionId, RegionNumber}; #[derive(Debug, Snafu)] #[snafu(visibility(pub))] @@ -35,7 +35,7 @@ pub enum Error { #[snafu(display("Failed to find Datanode, table: {} region: {:?}", table, region))] FindDatanode { table: String, - region: RegionId, + region: RegionNumber, location: Location, }, diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs index c4189de309..ae898c4e6f 100644 --- a/src/partition/src/manager.rs +++ b/src/partition/src/manager.rs @@ -126,14 +126,14 @@ impl PartitionRuleManager { let mut partitions = Vec::with_capacity(route.region_routes.len()); for r in route.region_routes.iter() { - let partition = r - .region - .partition - .clone() - .context(error::FindRegionRoutesSnafu { - region_id: r.region.id, - table_name: table.to_string(), - })?; + let partition = + r.region + .partition + .clone() + .with_context(|| error::FindRegionRoutesSnafu { + region_id: r.region.id, + table_name: table.to_string(), + })?; let partition_def = PartitionDef::try_from(partition)?; partitions.push(PartitionInfo { @@ -175,7 +175,7 @@ impl PartitionRuleManager { let regions = partitions .iter() - .map(|x| x.id as u32) + .map(|x| x.id.region_number()) .collect::>(); // TODO(LFC): Serializing and deserializing partition rule is ugly, must find a much more elegant way. diff --git a/src/storage/benches/memtable/util/regiondesc_util.rs b/src/storage/benches/memtable/util/regiondesc_util.rs index e7b77b92c2..937cccd9e5 100644 --- a/src/storage/benches/memtable/util/regiondesc_util.rs +++ b/src/storage/benches/memtable/util/regiondesc_util.rs @@ -57,7 +57,7 @@ impl RegionDescBuilder { pub fn build(self) -> RegionDescriptor { RegionDescriptor { - id: 0, + id: 0.into(), name: self.name, row_key: self.key_builder.build().unwrap(), default_cf: self.default_cf_builder.build().unwrap(), diff --git a/src/storage/src/compaction.rs b/src/storage/src/compaction.rs index 72152dfde2..3553e4e7c0 100644 --- a/src/storage/src/compaction.rs +++ b/src/storage/src/compaction.rs @@ -142,7 +142,7 @@ mod tests { let layer = Arc::new(crate::test_util::access_layer_util::MockAccessLayer {}); FileHandle::new( FileMeta { - region_id: 0, + region_id: 0.into(), file_id, time_range: Some(( Timestamp::new_millisecond(start_ts_millis), diff --git a/src/storage/src/compaction/noop.rs b/src/storage/src/compaction/noop.rs index 50ea24a590..eae5f49576 100644 --- a/src/storage/src/compaction/noop.rs +++ b/src/storage/src/compaction/noop.rs @@ -68,7 +68,7 @@ impl Request for NoopCompactionRequest { type Key = RegionId; fn key(&self) -> Self::Key { - 0 + RegionId::from(0) } fn complete(self, _result: Result<()>) {} diff --git a/src/storage/src/compaction/writer.rs b/src/storage/src/compaction/writer.rs index 8c73eeacce..4855d15c20 100644 --- a/src/storage/src/compaction/writer.rs +++ b/src/storage/src/compaction/writer.rs @@ -136,7 +136,7 @@ mod tests { use crate::sst::{self, FileId, FileMeta, FsAccessLayer, Source, SstInfo, WriteOptions}; use crate::test_util::descriptor_util::RegionDescBuilder; - const REGION_ID: RegionId = 1; + const REGION_ID: RegionId = RegionId::from_u64(1); fn schema_for_test() -> RegionSchemaRef { // Just build a region desc and use its columns metadata. @@ -263,7 +263,7 @@ mod tests { .unwrap(); let handle = FileHandle::new( FileMeta { - region_id: 0, + region_id: 0.into(), file_id: sst_file_id, time_range, level: 0, @@ -530,7 +530,7 @@ mod tests { .map(|f| { FileHandle::new( FileMeta { - region_id: 0, + region_id: 0.into(), file_id: f, level: 1, time_range: None, diff --git a/src/storage/src/file_purger.rs b/src/storage/src/file_purger.rs index 977a0d7179..9309dd0da1 100644 --- a/src/storage/src/file_purger.rs +++ b/src/storage/src/file_purger.rs @@ -155,7 +155,7 @@ mod tests { ( FileHandle::new( FileMeta { - region_id: 0, + region_id: 0.into(), file_id: sst_file_id, time_range: None, level: 0, @@ -185,7 +185,7 @@ mod tests { let (_file, path, layer) = create_sst_file(object_store.clone(), sst_file_id, noop_file_purger).await; let request = FilePurgeRequest { - region_id: 0, + region_id: 0.into(), file_id: sst_file_id, sst_layer: layer, }; diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs index 64ddc956cf..873b8015de 100644 --- a/src/storage/src/flush.rs +++ b/src/storage/src/flush.rs @@ -355,13 +355,13 @@ mod tests { assert_eq!(0, strategy.memory_active.load(Ordering::Relaxed)); let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 400, write_buffer_size: 300, }; assert_eq!(Some(FlushType::Region), strategy.should_flush(status)); let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 100, write_buffer_size: 300, }; @@ -373,7 +373,7 @@ mod tests { let strategy = SizeBasedStrategy::new(Some(1000)); strategy.reserve_mem(500); let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 300, write_buffer_size: 500, }; @@ -382,7 +382,7 @@ mod tests { // Flush region. let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 400, write_buffer_size: 300, }; @@ -390,7 +390,7 @@ mod tests { // More than mutable limitation, Flush global. let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 100, write_buffer_size: 300, }; @@ -415,7 +415,7 @@ mod tests { strategy.schedule_free_mem(200); // More than global limit. let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 100, write_buffer_size: 300, }; @@ -424,7 +424,7 @@ mod tests { // More than global limit, but mutable not enough (< 500). strategy.schedule_free_mem(450); let status = RegionStatus { - region_id: 1, + region_id: 1.into(), bytes_mutable: 100, write_buffer_size: 300, }; diff --git a/src/storage/src/flush/picker.rs b/src/storage/src/flush/picker.rs index 2e46629dcb..ce3fd424c9 100644 --- a/src/storage/src/flush/picker.rs +++ b/src/storage/src/flush/picker.rs @@ -120,7 +120,7 @@ pub trait FlushItem { #[async_trait] impl FlushItem for RegionImpl { fn item_id(&self) -> u64 { - self.id() + self.id().into() } fn last_flush_time(&self) -> i64 { diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs index e020d72c04..ba8bdc12d5 100644 --- a/src/storage/src/manifest/action.rs +++ b/src/storage/src/manifest/action.rs @@ -352,7 +352,7 @@ mod tests { fn mock_file_meta() -> FileMeta { FileMeta { - region_id: 0, + region_id: 0.into(), file_id: FileId::random(), time_range: None, level: 0, diff --git a/src/storage/src/manifest/test_utils.rs b/src/storage/src/manifest/test_utils.rs index 6a13635f5f..8d8b7ef693 100644 --- a/src/storage/src/manifest/test_utils.rs +++ b/src/storage/src/manifest/test_utils.rs @@ -54,7 +54,7 @@ pub fn build_region_edit( files_to_add: files_to_add .iter() .map(|f| FileMeta { - region_id: 0, + region_id: 0.into(), file_id: *f, time_range: None, level: 0, @@ -64,7 +64,7 @@ pub fn build_region_edit( files_to_remove: files_to_remove .iter() .map(|f| FileMeta { - region_id: 0, + region_id: 0.into(), file_id: *f, time_range: None, level: 0, diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs index 50dfb79358..4f54d8c135 100644 --- a/src/storage/src/metadata.rs +++ b/src/storage/src/metadata.rs @@ -778,7 +778,7 @@ impl Default for RegionMetadataBuilder { impl RegionMetadataBuilder { fn new() -> RegionMetadataBuilder { RegionMetadataBuilder { - id: 0, + id: 0.into(), name: String::new(), columns_meta_builder: ColumnsMetadataBuilder::default(), cfs_meta_builder: ColumnFamiliesMetadataBuilder::default(), diff --git a/src/storage/src/scheduler.rs b/src/storage/src/scheduler.rs index f87e1436a3..d9bb71a7cf 100644 --- a/src/storage/src/scheduler.rs +++ b/src/storage/src/scheduler.rs @@ -377,9 +377,15 @@ mod tests { let handler_cloned = handler.clone(); let _handle = common_runtime::spawn_bg(async move { handler_cloned.run().await }); - let _ = queue.write().unwrap().push_back(1, MockRequest::default()); + let _ = queue + .write() + .unwrap() + .push_back(1.into(), MockRequest::default()); handler.task_notifier.notify_one(); - let _ = queue.write().unwrap().push_back(2, MockRequest::default()); + let _ = queue + .write() + .unwrap() + .push_back(2.into(), MockRequest::default()); handler.task_notifier.notify_one(); tokio::time::timeout(Duration::from_secs(1), latch.wait()) @@ -443,8 +449,16 @@ mod tests { handler, ); - let _ = scheduler.schedule(MockRequest { region_id: 1 }).unwrap(); - let _ = scheduler.schedule(MockRequest { region_id: 2 }).unwrap(); + let _ = scheduler + .schedule(MockRequest { + region_id: 1.into(), + }) + .unwrap(); + let _ = scheduler + .schedule(MockRequest { + region_id: 2.into(), + }) + .unwrap(); tokio::time::timeout(Duration::from_secs(1), latch.wait()) .await @@ -473,7 +487,7 @@ mod tests { for i in 0..task_size { assert!(scheduler .schedule(MockRequest { - region_id: i as RegionId, + region_id: RegionId::from(i as u64), }) .is_ok()); } @@ -504,7 +518,7 @@ mod tests { for i in 0..task_size / 2 { assert!(scheduler .schedule(MockRequest { - region_id: i as RegionId, + region_id: RegionId::from(i as u64), }) .is_ok()); } @@ -513,7 +527,7 @@ mod tests { for i in task_size / 2..task_size { assert!(scheduler .schedule(MockRequest { - region_id: i as RegionId, + region_id: RegionId::from(i as u64), }) .is_ok()); } @@ -534,7 +548,12 @@ mod tests { let mut scheduled_task = 0; for _ in 0..10 { - if scheduler.schedule(MockRequest { region_id: 1 }).unwrap() { + if scheduler + .schedule(MockRequest { + region_id: 1.into(), + }) + .unwrap() + { scheduled_task += 1; } } @@ -568,7 +587,7 @@ mod tests { let handle = common_runtime::spawn_write(async move { for i in 0..10000 { if let Ok(res) = scheduler_cloned.schedule(MockRequest { - region_id: i as RegionId, + region_id: RegionId::from(i as u64), }) { if res { let _ = task_scheduled_cloned.fetch_add(1, Ordering::Relaxed); diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs index dff3c40e5a..3236b456fd 100644 --- a/src/storage/src/sst.rs +++ b/src/storage/src/sst.rs @@ -711,7 +711,7 @@ mod tests { fn create_file_meta(file_id: FileId, level: Level) -> FileMeta { FileMeta { - region_id: 0, + region_id: 0.into(), file_id, time_range: None, level, diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs index 1f906ecbce..145ee52217 100644 --- a/src/storage/src/sst/parquet.rs +++ b/src/storage/src/sst/parquet.rs @@ -562,7 +562,7 @@ mod tests { let layer = Arc::new(crate::test_util::access_layer_util::MockAccessLayer {}); FileHandle::new( FileMeta { - region_id: 0, + region_id: 0.into(), file_id, time_range: Some(( Timestamp::new_millisecond(0), diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs index 23e92e6532..ad7777dbff 100644 --- a/src/storage/src/test_util/descriptor_util.rs +++ b/src/storage/src/test_util/descriptor_util.rs @@ -19,8 +19,8 @@ use store_api::storage::{ RegionDescriptor, RegionId, RowKeyDescriptorBuilder, }; +use crate::test_util; use crate::test_util::schema_util::ColumnDef; -use crate::test_util::{self}; /// A RegionDescriptor builder for test. pub struct RegionDescBuilder { @@ -46,7 +46,7 @@ impl RegionDescBuilder { ); Self { - id: 0, + id: 0.into(), name: name.into(), last_column_id: 1, key_builder, @@ -54,8 +54,8 @@ impl RegionDescBuilder { } } - pub fn id(mut self, id: RegionId) -> Self { - self.id = id; + pub fn id(mut self, id: impl Into) -> Self { + self.id = id.into(); self } diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs index 473b5e4600..1aa4dd93da 100644 --- a/src/storage/src/wal.rs +++ b/src/storage/src/wal.rs @@ -56,7 +56,7 @@ impl Clone for Wal { impl Wal { pub fn new(region_id: RegionId, store: Arc) -> Self { - let namespace = store.namespace(region_id); + let namespace = store.namespace(region_id.into()); Self { region_id, namespace, @@ -268,7 +268,7 @@ mod tests { let log_file_dir_path = log_file_dir.path().to_str().unwrap(); let log_store = test_util::log_store_util::create_tmp_local_file_log_store(log_file_dir_path).await; - let wal = Wal::new(0, Arc::new(log_store)); + let wal = Wal::new(RegionId::from(0), Arc::new(log_store)); let res = wal.write(0, b"test1").await.unwrap(); @@ -284,7 +284,7 @@ mod tests { let log_file_dir_path = log_file_dir.path().to_str().unwrap(); let log_store = test_util::log_store_util::create_tmp_local_file_log_store(log_file_dir_path).await; - let wal = Wal::new(0, Arc::new(log_store)); + let wal = Wal::new(RegionId::from(0), Arc::new(log_store)); let header = WalHeader::with_last_manifest_version(111); let seq_num = 3; let _ = wal.write_to_wal(seq_num, header, None).await?; diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs index fce7d36091..efa87ed1de 100644 --- a/src/store-api/src/storage/descriptors.rs +++ b/src/store-api/src/storage/descriptors.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::fmt; + use derive_builder::Builder; use serde::{Deserialize, Serialize}; @@ -21,10 +23,83 @@ use crate::storage::{consts, ColumnDefaultConstraint, ColumnSchema, ConcreteData pub type ColumnId = u32; /// Id of column family, unique in each region. pub type ColumnFamilyId = u32; -/// Id of the region. It's generated by concatenating table id and region number. -pub type RegionId = u64; - +/// Sequence number of regions under the same table. pub type RegionNumber = u32; +/// Id of table. +pub type TableId = u32; + +/// Id of the region. It's generated by concatenating table id and region number. +#[derive(Default, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct RegionId(u64); + +impl RegionId { + /// Construct a new [RegionId] from table id and region number. + pub const fn new(table_id: TableId, region_number: RegionNumber) -> RegionId { + RegionId(((table_id as u64) << 32) | region_number as u64) + } + + /// Returns the table id of the region. + pub const fn table_id(&self) -> TableId { + (self.0 >> 32) as TableId + } + + /// Returns the region number of the region. + pub const fn region_number(&self) -> RegionNumber { + self.0 as RegionNumber + } + + /// Returns the region id as u64. + pub const fn as_u64(&self) -> u64 { + self.0 + } + + /// Construct a new [RegionId] from u64. + pub const fn from_u64(id: u64) -> RegionId { + RegionId(id) + } +} + +impl fmt::Debug for RegionId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}({}, {})", + self.0, + self.table_id(), + self.region_number() + ) + } +} + +impl fmt::Display for RegionId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for RegionId { + fn from(region_id: u64) -> RegionId { + RegionId::from_u64(region_id) + } +} + +impl From for u64 { + fn from(region_id: RegionId) -> u64 { + region_id.as_u64() + } +} + +impl PartialEq for RegionId { + fn eq(&self, other: &u64) -> bool { + self.0 == *other + } +} + +impl PartialEq for u64 { + fn eq(&self, other: &RegionId) -> bool { + *self == other.0 + } +} /// A [ColumnDescriptor] contains information to create a column. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Builder)] @@ -133,6 +208,7 @@ pub struct ColumnFamilyDescriptor { #[derive(Debug, Clone, PartialEq, Eq, Builder)] #[builder(pattern = "owned")] pub struct RegionDescriptor { + #[builder(setter(into))] pub id: RegionId, /// Region name. #[builder(setter(into))] @@ -305,4 +381,34 @@ mod tests { .unwrap(); assert_eq!(1, desc.columns.len()); } + + #[test] + fn test_region_id() { + assert_eq!(RegionId::new(0, 1), 1); + assert_eq!(4294967296, RegionId::new(1, 0)); + assert_eq!(4294967297, RegionId::new(1, 1)); + assert_eq!(4294967396, RegionId::new(1, 100)); + assert_eq!(8589934602, RegionId::new(2, 10)); + assert_eq!(18446744069414584330, RegionId::new(u32::MAX, 10)); + + let region_id = RegionId::new(u32::MAX, 1); + assert_eq!(u32::MAX, region_id.table_id()); + assert_eq!(1, region_id.region_number()); + let inner: u64 = region_id.into(); + assert_eq!(RegionId::from(inner), region_id); + + let region_id = RegionId::new(1234, 5); + assert_eq!("5299989643269(1234, 5)", region_id.to_string()); + assert_eq!("5299989643269(1234, 5)", format!("{:?}", region_id)); + } + + #[test] + fn test_region_id_to_json() { + let region_id = RegionId::from(4294967297); + let json = serde_json::to_string(®ion_id).unwrap(); + assert_eq!("4294967297", json); + + let parsed: RegionId = serde_json::from_str(&json).unwrap(); + assert_eq!(region_id, parsed); + } } diff --git a/src/store-api/src/storage/region.rs b/src/store-api/src/storage/region.rs index c2759858f3..04e8de9688 100644 --- a/src/store-api/src/storage/region.rs +++ b/src/store-api/src/storage/region.rs @@ -79,7 +79,7 @@ pub trait Region: Send + Sync + Clone + std::fmt::Debug + 'static { fn region_stat(&self) -> RegionStat { RegionStat { - region_id: self.id(), + region_id: self.id().into(), disk_usage_bytes: self.disk_usage_bytes(), } } diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs index 5b4115c6b2..b0e8ef2a3e 100644 --- a/src/table/src/engine.rs +++ b/src/table/src/engine.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use common_base::paths::DATA_DIR; use common_procedure::BoxedProcedure; -use store_api::storage::{RegionId, RegionNumber}; +use store_api::storage::RegionNumber; use crate::error::{self, Result}; use crate::metadata::TableId; @@ -172,24 +172,6 @@ pub fn region_name(table_id: TableId, region_number: RegionNumber) -> String { format!("{table_id}_{region_number:010}") } -/// Construct a [RegionId] from specific `table_id` and `region_number`. -#[inline] -pub fn region_id(table_id: TableId, region_number: RegionNumber) -> RegionId { - (u64::from(table_id) << 32) | u64::from(region_number) -} - -/// Retrieve the table id from specific `region_id`. -#[inline] -pub fn table_id(region_id: RegionId) -> TableId { - (region_id >> 32) as TableId -} - -/// Retrieve the region_number from specific `region_id`. -#[inline] -pub fn region_number(region_id: RegionId) -> RegionNumber { - region_id as RegionNumber -} - #[inline] pub fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String { format!("{DATA_DIR}{catalog_name}/{schema_name}/{table_id}/") @@ -209,11 +191,4 @@ mod tests { assert_eq!("greptime.public.test", table_ref.to_string()); } - - #[test] - fn test_table_id() { - let region_id = region_id(u32::MAX, 1); - let table_id = table_id(region_id); - assert_eq!(u32::MAX, table_id); - } }