1use std::collections::HashMap;
16
17use api::helper::ColumnDataTypeWrapper;
18use api::v1::{
19 ColumnSchema, PrimaryKeyEncoding as PrimaryKeyEncodingProto, Row, Rows, SemanticType, Value,
20 WriteHint,
21};
22use common_telemetry::{error, info};
23use fxhash::FxHashMap;
24use snafu::{OptionExt, ResultExt, ensure};
25use store_api::codec::PrimaryKeyEncoding;
26use store_api::metadata::ColumnMetadata;
27use store_api::region_request::{
28 AffectedRows, RegionDeleteRequest, RegionPutRequest, RegionRequest,
29};
30use store_api::storage::{RegionId, TableId};
31
32use crate::engine::MetricEngineInner;
33use crate::error::{
34 ColumnNotFoundSnafu, CreateDefaultSnafu, ForbiddenPhysicalAlterSnafu, InvalidRequestSnafu,
35 LogicalRegionNotFoundSnafu, PhysicalRegionNotFoundSnafu, Result, UnexpectedRequestSnafu,
36 UnsupportedRegionRequestSnafu,
37};
38use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_OPERATION_ELAPSED};
39use crate::row_modifier::{RowsIter, TableIdInput};
40use crate::utils::to_data_region_id;
41
42impl MetricEngineInner {
43 pub async fn put_region(
45 &self,
46 region_id: RegionId,
47 request: RegionPutRequest,
48 ) -> Result<AffectedRows> {
49 let is_putting_physical_region =
50 self.state.read().unwrap().exist_physical_region(region_id);
51
52 if is_putting_physical_region {
53 info!(
54 "Metric region received put request {request:?} on physical region {region_id:?}"
55 );
56 FORBIDDEN_OPERATION_COUNT.inc();
57
58 ForbiddenPhysicalAlterSnafu.fail()
59 } else {
60 self.put_logical_region(region_id, request).await
61 }
62 }
63
64 pub async fn put_regions_batch(
74 &self,
75 requests: impl ExactSizeIterator<Item = (RegionId, RegionPutRequest)>,
76 ) -> Result<AffectedRows> {
77 let len = requests.len();
78
79 if len == 0 {
80 return Ok(0);
81 }
82
83 let _timer = MITO_OPERATION_ELAPSED
84 .with_label_values(&["put_batch"])
85 .start_timer();
86
87 if len == 1 {
89 let (logical_id, req) = requests.into_iter().next().unwrap();
90 return self.put_logical_region(logical_id, req).await;
91 }
92
93 let mut requests_per_physical: HashMap<RegionId, Vec<(RegionId, RegionPutRequest)>> =
94 HashMap::new();
95 for (logical_region_id, request) in requests {
96 let physical_region_id = self.find_physical_region_id(logical_region_id)?;
97 requests_per_physical
98 .entry(physical_region_id)
99 .or_default()
100 .push((logical_region_id, request));
101 }
102
103 let mut total_affected_rows: AffectedRows = 0;
104 for (physical_region_id, requests) in requests_per_physical {
105 let affected_rows = self
106 .put_regions_batch_single_physical(physical_region_id, requests)
107 .await?;
108 total_affected_rows += affected_rows;
109 }
110
111 Ok(total_affected_rows)
112 }
113
114 async fn put_regions_batch_single_physical(
121 &self,
122 physical_region_id: RegionId,
123 mut requests: Vec<(RegionId, RegionPutRequest)>,
124 ) -> Result<AffectedRows> {
125 if requests.is_empty() {
126 return Ok(0);
127 }
128
129 let data_region_id = to_data_region_id(physical_region_id);
130 let primary_key_encoding = self.get_primary_key_encoding(data_region_id)?;
131
132 self.validate_batch_requests(physical_region_id, &mut requests)
134 .await?;
135
136 let (merged_request, total_affected_rows) = match primary_key_encoding {
138 PrimaryKeyEncoding::Sparse => self.merge_sparse_batch(physical_region_id, requests)?,
139 PrimaryKeyEncoding::Dense => self.merge_dense_batch(data_region_id, requests)?,
140 };
141
142 self.data_region
144 .write_data(data_region_id, RegionRequest::Put(merged_request))
145 .await?;
146
147 Ok(total_affected_rows)
148 }
149
150 fn get_primary_key_encoding(&self, data_region_id: RegionId) -> Result<PrimaryKeyEncoding> {
152 let state = self.state.read().unwrap();
153 state
154 .get_primary_key_encoding(data_region_id)
155 .context(PhysicalRegionNotFoundSnafu {
156 region_id: data_region_id,
157 })
158 }
159
160 async fn validate_batch_requests(
162 &self,
163 physical_region_id: RegionId,
164 requests: &mut [(RegionId, RegionPutRequest)],
165 ) -> Result<()> {
166 for (logical_region_id, request) in requests {
167 self.verify_rows(
168 *logical_region_id,
169 physical_region_id,
170 &mut request.rows,
171 true,
172 )
173 .await?;
174 }
175 Ok(())
176 }
177
178 fn merge_sparse_batch(
180 &self,
181 physical_region_id: RegionId,
182 requests: Vec<(RegionId, RegionPutRequest)>,
183 ) -> Result<(RegionPutRequest, AffectedRows)> {
184 let total_rows: usize = requests.iter().map(|(_, req)| req.rows.rows.len()).sum();
185 let mut merged_rows = Vec::with_capacity(total_rows);
186 let mut total_affected_rows: AffectedRows = 0;
187 let mut output_schema: Option<Vec<ColumnSchema>> = None;
188 let mut merged_version: Option<u64> = None;
189
190 for (logical_region_id, mut request) in requests {
192 if let Some(request_version) = request.partition_expr_version {
193 if let Some(merged_version) = merged_version {
194 ensure!(
195 merged_version == request_version,
196 InvalidRequestSnafu {
197 region_id: physical_region_id,
198 reason: "inconsistent partition expr version in batch"
199 }
200 );
201 } else {
202 merged_version = Some(request_version);
203 }
204 }
205 self.modify_rows(
206 physical_region_id,
207 logical_region_id.table_id(),
208 &mut request.rows,
209 PrimaryKeyEncoding::Sparse,
210 )?;
211
212 let row_count = request.rows.rows.len();
213 total_affected_rows += row_count as AffectedRows;
214
215 if output_schema.is_none() {
217 output_schema = Some(request.rows.schema.clone());
218 }
219
220 merged_rows.extend(request.rows.rows);
221 }
222
223 let schema = output_schema.unwrap();
225
226 let merged_request = RegionPutRequest {
227 rows: Rows {
228 schema,
229 rows: merged_rows,
230 },
231 hint: Some(WriteHint {
232 primary_key_encoding: PrimaryKeyEncodingProto::Sparse.into(),
233 }),
234 partition_expr_version: merged_version,
235 };
236
237 Ok((merged_request, total_affected_rows))
238 }
239
240 fn merge_dense_batch(
246 &self,
247 data_region_id: RegionId,
248 requests: Vec<(RegionId, RegionPutRequest)>,
249 ) -> Result<(RegionPutRequest, AffectedRows)> {
250 let merged_schema = Self::build_union_schema(&requests);
252
253 let (merged_rows, table_ids, merged_version) =
255 Self::align_requests_to_schema(requests, &merged_schema)?;
256
257 let final_rows = {
259 let state = self.state.read().unwrap();
260 let physical_columns = state
261 .physical_region_states()
262 .get(&data_region_id)
263 .with_context(|| PhysicalRegionNotFoundSnafu {
264 region_id: data_region_id,
265 })?
266 .physical_columns();
267
268 let iter = RowsIter::new(
269 Rows {
270 schema: merged_schema,
271 rows: merged_rows,
272 },
273 physical_columns,
274 );
275
276 self.row_modifier.modify_rows(
277 iter,
278 TableIdInput::Batch(&table_ids),
279 PrimaryKeyEncoding::Dense,
280 )?
281 };
282
283 let merged_request = RegionPutRequest {
284 rows: final_rows,
285 hint: None,
286 partition_expr_version: merged_version,
287 };
288
289 Ok((merged_request, table_ids.len() as AffectedRows))
290 }
291
292 fn build_union_schema(requests: &[(RegionId, RegionPutRequest)]) -> Vec<ColumnSchema> {
294 let mut schema_map: HashMap<&str, ColumnSchema> = HashMap::new();
295 for (_, request) in requests {
296 for col in &request.rows.schema {
297 schema_map
298 .entry(col.column_name.as_str())
299 .or_insert_with(|| col.clone());
300 }
301 }
302 schema_map.into_values().collect()
303 }
304
305 fn align_requests_to_schema(
306 requests: Vec<(RegionId, RegionPutRequest)>,
307 merged_schema: &[ColumnSchema],
308 ) -> Result<(Vec<Row>, Vec<TableId>, Option<u64>)> {
309 let total_rows: usize = requests.iter().map(|(_, req)| req.rows.rows.len()).sum();
311 let mut merged_rows = Vec::with_capacity(total_rows);
312 let mut table_ids = Vec::with_capacity(total_rows);
313 let mut merged_version: Option<u64> = None;
314
315 let null_value = Value { value_data: None };
316
317 for (logical_region_id, request) in requests {
318 if let Some(request_version) = request.partition_expr_version {
319 if let Some(merged_version) = merged_version {
320 ensure!(
321 merged_version == request_version,
322 InvalidRequestSnafu {
323 region_id: logical_region_id,
324 reason: "inconsistent partition expr version in batch"
325 }
326 );
327 } else {
328 merged_version = Some(request_version);
329 }
330 }
331 let table_id = logical_region_id.table_id();
332
333 let col_name_to_idx: FxHashMap<&str, usize> = request
335 .rows
336 .schema
337 .iter()
338 .enumerate()
339 .map(|(idx, col)| (col.column_name.as_str(), idx))
340 .collect();
341
342 let col_mapping: Vec<Option<usize>> = merged_schema
346 .iter()
347 .map(|merged_col| {
348 col_name_to_idx
349 .get(merged_col.column_name.as_str())
350 .copied()
351 })
352 .collect();
353
354 for mut row in request.rows.rows {
356 let mut aligned_values = Vec::with_capacity(merged_schema.len());
357 for &opt_idx in &col_mapping {
358 aligned_values.push(match opt_idx {
359 Some(idx) => std::mem::take(&mut row.values[idx]),
360 None => null_value.clone(),
361 });
362 }
363 merged_rows.push(Row {
364 values: aligned_values,
365 });
366 table_ids.push(table_id);
367 }
368 }
369
370 Ok((merged_rows, table_ids, merged_version))
371 }
372
373 fn find_physical_region_id(&self, logical_region_id: RegionId) -> Result<RegionId> {
375 let state = self.state.read().unwrap();
376 state
377 .logical_regions()
378 .get(&logical_region_id)
379 .copied()
380 .context(LogicalRegionNotFoundSnafu {
381 region_id: logical_region_id,
382 })
383 }
384
385 pub async fn delete_region(
387 &self,
388 region_id: RegionId,
389 request: RegionDeleteRequest,
390 ) -> Result<AffectedRows> {
391 if self.is_physical_region(region_id) {
392 info!(
393 "Metric region received delete request {request:?} on physical region {region_id:?}"
394 );
395 FORBIDDEN_OPERATION_COUNT.inc();
396
397 UnsupportedRegionRequestSnafu {
398 request: RegionRequest::Delete(request),
399 }
400 .fail()
401 } else {
402 self.delete_logical_region(region_id, request).await
403 }
404 }
405
406 async fn put_logical_region(
407 &self,
408 logical_region_id: RegionId,
409 mut request: RegionPutRequest,
410 ) -> Result<AffectedRows> {
411 let _timer = MITO_OPERATION_ELAPSED
412 .with_label_values(&["put"])
413 .start_timer();
414
415 let (physical_region_id, data_region_id, primary_key_encoding) =
416 self.find_data_region_meta(logical_region_id)?;
417
418 self.verify_rows(
419 logical_region_id,
420 physical_region_id,
421 &mut request.rows,
422 true,
423 )
424 .await?;
425
426 self.modify_rows(
429 physical_region_id,
430 logical_region_id.table_id(),
431 &mut request.rows,
432 primary_key_encoding,
433 )?;
434 if primary_key_encoding == PrimaryKeyEncoding::Sparse {
435 request.hint = Some(WriteHint {
436 primary_key_encoding: PrimaryKeyEncodingProto::Sparse.into(),
437 });
438 }
439 self.data_region
440 .write_data(data_region_id, RegionRequest::Put(request))
441 .await
442 }
443
444 async fn delete_logical_region(
445 &self,
446 logical_region_id: RegionId,
447 mut request: RegionDeleteRequest,
448 ) -> Result<AffectedRows> {
449 let _timer = MITO_OPERATION_ELAPSED
450 .with_label_values(&["delete"])
451 .start_timer();
452
453 let (physical_region_id, data_region_id, primary_key_encoding) =
454 self.find_data_region_meta(logical_region_id)?;
455
456 self.verify_rows(
457 logical_region_id,
458 physical_region_id,
459 &mut request.rows,
460 false,
461 )
462 .await?;
463
464 self.modify_rows(
467 physical_region_id,
468 logical_region_id.table_id(),
469 &mut request.rows,
470 primary_key_encoding,
471 )?;
472 if primary_key_encoding == PrimaryKeyEncoding::Sparse {
473 request.hint = Some(WriteHint {
474 primary_key_encoding: PrimaryKeyEncodingProto::Sparse.into(),
475 });
476 }
477 self.data_region
478 .write_data(data_region_id, RegionRequest::Delete(request))
479 .await
480 }
481
482 pub(crate) fn find_data_region_meta(
483 &self,
484 logical_region_id: RegionId,
485 ) -> Result<(RegionId, RegionId, PrimaryKeyEncoding)> {
486 let state = self.state.read().unwrap();
487 let physical_region_id = *state
488 .logical_regions()
489 .get(&logical_region_id)
490 .with_context(|| LogicalRegionNotFoundSnafu {
491 region_id: logical_region_id,
492 })?;
493 let data_region_id = to_data_region_id(physical_region_id);
494 let primary_key_encoding = state.get_primary_key_encoding(data_region_id).context(
495 PhysicalRegionNotFoundSnafu {
496 region_id: data_region_id,
497 },
498 )?;
499 Ok((physical_region_id, data_region_id, primary_key_encoding))
500 }
501
502 async fn verify_rows(
513 &self,
514 logical_region_id: RegionId,
515 physical_region_id: RegionId,
516 rows: &mut Rows,
517 check_fields: bool,
518 ) -> Result<()> {
519 let data_region_id = to_data_region_id(physical_region_id);
521 let state = self.state.read().unwrap();
522 if !state.is_logical_region_exist(logical_region_id) {
523 error!("Trying to write to an nonexistent region {logical_region_id}");
524 return LogicalRegionNotFoundSnafu {
525 region_id: logical_region_id,
526 }
527 .fail();
528 }
529
530 let physical_state = state
532 .physical_region_states()
533 .get(&data_region_id)
534 .context(PhysicalRegionNotFoundSnafu {
535 region_id: data_region_id,
536 })?;
537 let physical_columns = physical_state.physical_columns();
538 for col in &rows.schema {
539 let info = physical_columns
540 .get(&col.column_name)
541 .context(ColumnNotFoundSnafu {
542 name: &col.column_name,
543 region_id: logical_region_id,
544 })?;
545
546 ensure!(
547 api::helper::is_column_type_value_eq(
548 col.datatype,
549 col.datatype_extension.clone(),
550 &info.column_schema.data_type
551 ),
552 InvalidRequestSnafu {
553 region_id: logical_region_id,
554 reason: format!(
555 "column {} expect type {:?}, given: {}({})",
556 col.column_name,
557 info.column_schema.data_type,
558 api::v1::ColumnDataType::try_from(col.datatype)
559 .map(|v| v.as_str_name())
560 .unwrap_or("Unknown"),
561 col.datatype,
562 ),
563 }
564 );
565
566 ensure!(
567 api::helper::is_semantic_type_eq(col.semantic_type, info.semantic_type),
568 InvalidRequestSnafu {
569 region_id: logical_region_id,
570 reason: format!(
571 "column {} expect semantic type {:?}, given: {}({})",
572 col.column_name,
573 info.semantic_type,
574 api::v1::SemanticType::try_from(col.semantic_type)
575 .map(|v| v.as_str_name())
576 .unwrap_or("Unknown"),
577 col.semantic_type,
578 ),
579 }
580 );
581 }
582
583 let ts_name = physical_state.time_index_column_name();
584 ensure!(
585 rows.schema.iter().any(|col| col.column_name == ts_name),
586 InvalidRequestSnafu {
587 region_id: logical_region_id,
588 reason: format!("missing required time index column {ts_name}"),
589 }
590 );
591
592 if check_fields {
593 let field_name = physical_state.field_column_name();
594 if !rows.schema.iter().any(|col| col.column_name == field_name) {
595 let field_meta =
596 physical_columns
597 .get(field_name)
598 .with_context(|| ColumnNotFoundSnafu {
599 name: field_name,
600 region_id: logical_region_id,
601 })?;
602 Self::fill_missing_field_column(logical_region_id, field_name, field_meta, rows)?;
603 }
604 }
605
606 Ok(())
607 }
608
609 fn fill_missing_field_column(
610 logical_region_id: RegionId,
611 field_name: &str,
612 field_meta: &ColumnMetadata,
613 rows: &mut Rows,
614 ) -> Result<()> {
615 ensure!(
616 !field_meta.column_schema.is_default_impure(),
617 UnexpectedRequestSnafu {
618 reason: format!(
619 "unexpected impure default value with region_id: {logical_region_id}, column: {field_name}, default_value: {:?}",
620 field_meta.column_schema.default_constraint(),
621 ),
622 }
623 );
624
625 let default_value = field_meta
626 .column_schema
627 .create_default()
628 .context(CreateDefaultSnafu {
629 region_id: logical_region_id,
630 column: field_name,
631 })?
632 .with_context(|| InvalidRequestSnafu {
633 region_id: logical_region_id,
634 reason: format!("missing required field column {field_name}"),
635 })?;
636 let default_value = api::helper::to_grpc_value(default_value);
637 let (datatype, datatype_extension) =
638 ColumnDataTypeWrapper::try_from(field_meta.column_schema.data_type.clone())
639 .map_err(|e| {
640 InvalidRequestSnafu {
641 region_id: logical_region_id,
642 reason: format!(
643 "no protobuf type for field column {field_name} ({:?}): {e}",
644 field_meta.column_schema.data_type
645 ),
646 }
647 .build()
648 })?
649 .to_parts();
650
651 rows.schema.push(ColumnSchema {
652 column_name: field_name.to_string(),
653 datatype: datatype as i32,
654 semantic_type: SemanticType::Field as i32,
655 datatype_extension,
656 options: None,
657 });
658
659 for row in &mut rows.rows {
660 row.values.push(default_value.clone());
661 }
662
663 Ok(())
664 }
665
666 fn modify_rows(
670 &self,
671 physical_region_id: RegionId,
672 table_id: TableId,
673 rows: &mut Rows,
674 encoding: PrimaryKeyEncoding,
675 ) -> Result<()> {
676 let input = std::mem::take(rows);
677 let iter = {
678 let state = self.state.read().unwrap();
679 let physical_columns = state
680 .physical_region_states()
681 .get(&physical_region_id)
682 .with_context(|| PhysicalRegionNotFoundSnafu {
683 region_id: physical_region_id,
684 })?
685 .physical_columns();
686 RowsIter::new(input, physical_columns)
687 };
688 let output =
689 self.row_modifier
690 .modify_rows(iter, TableIdInput::Single(table_id), encoding)?;
691 *rows = output;
692 Ok(())
693 }
694}
695
696#[cfg(test)]
697mod tests {
698 use std::collections::HashSet;
699
700 use api::v1::value::ValueData;
701 use api::v1::{ColumnDataType, ColumnSchema as PbColumnSchema};
702 use common_error::ext::ErrorExt;
703 use common_error::status_code::StatusCode;
704 use common_function::utils::partition_expr_version;
705 use common_recordbatch::RecordBatches;
706 use datatypes::prelude::ConcreteDataType;
707 use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
708 use datatypes::value::Value as PartitionValue;
709 use partition::expr::col;
710 use store_api::metadata::ColumnMetadata;
711 use store_api::metric_engine_consts::{
712 DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME,
713 MEMTABLE_PARTITION_TREE_PRIMARY_KEY_ENCODING,
714 };
715 use store_api::path_utils::table_dir;
716 use store_api::region_engine::RegionEngine;
717 use store_api::region_request::{
718 EnterStagingRequest, RegionRequest, StagingPartitionDirective,
719 };
720 use store_api::storage::ScanRequest;
721 use store_api::storage::consts::PRIMARY_KEY_COLUMN_NAME;
722
723 use super::*;
724 use crate::test_util::{self, TestEnv};
725
726 fn assert_merged_schema(rows: &Rows, expect_sparse: bool) {
727 let column_names: HashSet<String> = rows
728 .schema
729 .iter()
730 .map(|col| col.column_name.clone())
731 .collect();
732
733 if expect_sparse {
734 assert!(
735 column_names.contains(PRIMARY_KEY_COLUMN_NAME),
736 "sparse encoding should include primary key column"
737 );
738 assert!(
739 !column_names.contains(DATA_SCHEMA_TABLE_ID_COLUMN_NAME),
740 "sparse encoding should not include table id column"
741 );
742 assert!(
743 !column_names.contains(DATA_SCHEMA_TSID_COLUMN_NAME),
744 "sparse encoding should not include tsid column"
745 );
746 assert!(
747 !column_names.contains("job"),
748 "sparse encoding should not include tag columns"
749 );
750 assert!(
751 !column_names.contains("instance"),
752 "sparse encoding should not include tag columns"
753 );
754 } else {
755 assert!(
756 !column_names.contains(PRIMARY_KEY_COLUMN_NAME),
757 "dense encoding should not include primary key column"
758 );
759 assert!(
760 column_names.contains(DATA_SCHEMA_TABLE_ID_COLUMN_NAME),
761 "dense encoding should include table id column"
762 );
763 assert!(
764 column_names.contains(DATA_SCHEMA_TSID_COLUMN_NAME),
765 "dense encoding should include tsid column"
766 );
767 assert!(
768 column_names.contains("job"),
769 "dense encoding should keep tag columns"
770 );
771 assert!(
772 column_names.contains("instance"),
773 "dense encoding should keep tag columns"
774 );
775 }
776 }
777
778 fn job_partition_expr_json() -> String {
779 let expr = col("job")
780 .gt_eq(PartitionValue::String("job-0".into()))
781 .and(col("job").lt(PartitionValue::String("job-9".into())));
782 expr.as_json_str().unwrap()
783 }
784
785 async fn create_logical_region_with_tags(
786 env: &TestEnv,
787 physical_region_id: RegionId,
788 logical_region_id: RegionId,
789 tags: &[&str],
790 ) {
791 let region_create_request = test_util::create_logical_region_request(
792 tags,
793 physical_region_id,
794 &table_dir("test", logical_region_id.table_id()),
795 );
796 env.metric()
797 .handle_request(
798 logical_region_id,
799 RegionRequest::Create(region_create_request),
800 )
801 .await
802 .unwrap();
803 }
804
805 async fn run_batch_write_with_schema_variants(
806 env: &TestEnv,
807 physical_region_id: RegionId,
808 options: Vec<(String, String)>,
809 expect_sparse: bool,
810 ) {
811 env.create_physical_region(physical_region_id, &TestEnv::default_table_dir(), options)
812 .await;
813
814 let logical_region_1 = env.default_logical_region_id();
815 let logical_region_2 = RegionId::new(1024, 1);
816
817 create_logical_region_with_tags(env, physical_region_id, logical_region_1, &["job"]).await;
818 create_logical_region_with_tags(
819 env,
820 physical_region_id,
821 logical_region_2,
822 &["job", "instance"],
823 )
824 .await;
825
826 let schema_1 = test_util::row_schema_with_tags(&["job"]);
827 let schema_2 = test_util::row_schema_with_tags(&["job", "instance"]);
828
829 let data_region_id = RegionId::new(physical_region_id.table_id(), 2);
830 let primary_key_encoding = env
831 .metric()
832 .inner
833 .get_primary_key_encoding(data_region_id)
834 .unwrap();
835 assert_eq!(
836 primary_key_encoding,
837 if expect_sparse {
838 PrimaryKeyEncoding::Sparse
839 } else {
840 PrimaryKeyEncoding::Dense
841 }
842 );
843
844 let build_requests = || {
845 let rows_1 = test_util::build_rows(1, 3);
846 let rows_2 = test_util::build_rows(2, 2);
847
848 vec![
849 (
850 logical_region_1,
851 RegionPutRequest {
852 rows: Rows {
853 schema: schema_1.clone(),
854 rows: rows_1,
855 },
856 hint: None,
857 partition_expr_version: None,
858 },
859 ),
860 (
861 logical_region_2,
862 RegionPutRequest {
863 rows: Rows {
864 schema: schema_2.clone(),
865 rows: rows_2,
866 },
867 hint: None,
868 partition_expr_version: None,
869 },
870 ),
871 ]
872 };
873
874 let merged_request = if expect_sparse {
875 let (merged_request, _) = env
876 .metric()
877 .inner
878 .merge_sparse_batch(physical_region_id, build_requests())
879 .unwrap();
880 let hint = merged_request
881 .hint
882 .as_ref()
883 .expect("missing sparse write hint");
884 assert_eq!(
885 hint.primary_key_encoding,
886 PrimaryKeyEncodingProto::Sparse as i32
887 );
888 merged_request
889 } else {
890 let (merged_request, _) = env
891 .metric()
892 .inner
893 .merge_dense_batch(data_region_id, build_requests())
894 .unwrap();
895 assert!(merged_request.hint.is_none());
896 merged_request
897 };
898
899 assert_merged_schema(&merged_request.rows, expect_sparse);
900
901 let affected_rows = env
902 .metric()
903 .inner
904 .put_regions_batch(build_requests().into_iter())
905 .await
906 .unwrap();
907 assert_eq!(affected_rows, 5);
908
909 let request = ScanRequest::default();
910 let stream = env
911 .mito()
912 .scan_to_stream(data_region_id, request)
913 .await
914 .unwrap();
915 let batches = RecordBatches::try_collect(stream).await.unwrap();
916
917 assert_eq!(batches.iter().map(|b| b.num_rows()).sum::<usize>(), 5);
918 }
919
920 #[tokio::test]
921 async fn test_write_logical_region() {
922 let env = TestEnv::new().await;
923 env.init_metric_region().await;
924
925 let schema = test_util::row_schema_with_tags(&["job"]);
927 let rows = test_util::build_rows(1, 5);
928 let request = RegionRequest::Put(RegionPutRequest {
929 rows: Rows { schema, rows },
930 hint: None,
931 partition_expr_version: None,
932 });
933
934 let logical_region_id = env.default_logical_region_id();
936 let result = env
937 .metric()
938 .handle_request(logical_region_id, request)
939 .await
940 .unwrap();
941 assert_eq!(result.affected_rows, 5);
942
943 let physical_region_id = env.default_physical_region_id();
945 let request = ScanRequest::default();
946 let stream = env
947 .metric()
948 .scan_to_stream(physical_region_id, request)
949 .await
950 .unwrap();
951 let batches = RecordBatches::try_collect(stream).await.unwrap();
952 let expected = "\
953+-------------------------+----------------+------------+---------------------+-------+
954| greptime_timestamp | greptime_value | __table_id | __tsid | job |
955+-------------------------+----------------+------------+---------------------+-------+
956| 1970-01-01T00:00:00 | 0.0 | 3 | 2955007454552897459 | tag_0 |
957| 1970-01-01T00:00:00.001 | 1.0 | 3 | 2955007454552897459 | tag_0 |
958| 1970-01-01T00:00:00.002 | 2.0 | 3 | 2955007454552897459 | tag_0 |
959| 1970-01-01T00:00:00.003 | 3.0 | 3 | 2955007454552897459 | tag_0 |
960| 1970-01-01T00:00:00.004 | 4.0 | 3 | 2955007454552897459 | tag_0 |
961+-------------------------+----------------+------------+---------------------+-------+";
962 assert_eq!(expected, batches.pretty_print().unwrap(), "physical region");
963
964 let request = ScanRequest::default();
966 let stream = env
967 .metric()
968 .scan_to_stream(logical_region_id, request)
969 .await
970 .unwrap();
971 let batches = RecordBatches::try_collect(stream).await.unwrap();
972 let expected = "\
973+-------------------------+----------------+-------+
974| greptime_timestamp | greptime_value | job |
975+-------------------------+----------------+-------+
976| 1970-01-01T00:00:00 | 0.0 | tag_0 |
977| 1970-01-01T00:00:00.001 | 1.0 | tag_0 |
978| 1970-01-01T00:00:00.002 | 2.0 | tag_0 |
979| 1970-01-01T00:00:00.003 | 3.0 | tag_0 |
980| 1970-01-01T00:00:00.004 | 4.0 | tag_0 |
981+-------------------------+----------------+-------+";
982 assert_eq!(expected, batches.pretty_print().unwrap(), "logical region");
983 }
984
985 #[tokio::test]
986 async fn test_write_logical_region_row_count() {
987 let env = TestEnv::new().await;
988 env.init_metric_region().await;
989 let engine = env.metric();
990
991 let logical_region_id = env.default_logical_region_id();
993 let columns = &["odd", "even", "Ev_En"];
994 let alter_request = test_util::alter_logical_region_add_tag_columns(123456, columns);
995 engine
996 .handle_request(logical_region_id, RegionRequest::Alter(alter_request))
997 .await
998 .unwrap();
999
1000 let schema = test_util::row_schema_with_tags(columns);
1002 let rows = test_util::build_rows(3, 100);
1003 let request = RegionRequest::Put(RegionPutRequest {
1004 rows: Rows { schema, rows },
1005 hint: None,
1006 partition_expr_version: None,
1007 });
1008
1009 let result = engine
1011 .handle_request(logical_region_id, request)
1012 .await
1013 .unwrap();
1014 assert_eq!(100, result.affected_rows);
1015 }
1016
1017 #[tokio::test]
1018 async fn test_write_physical_region() {
1019 let env = TestEnv::new().await;
1020 env.init_metric_region().await;
1021 let engine = env.metric();
1022
1023 let physical_region_id = env.default_physical_region_id();
1024 let schema = test_util::row_schema_with_tags(&["abc"]);
1025 let rows = test_util::build_rows(1, 100);
1026 let request = RegionRequest::Put(RegionPutRequest {
1027 rows: Rows { schema, rows },
1028 hint: None,
1029 partition_expr_version: None,
1030 });
1031
1032 engine
1033 .handle_request(physical_region_id, request)
1034 .await
1035 .unwrap_err();
1036 }
1037
1038 #[tokio::test]
1039 async fn test_write_nonexist_logical_region() {
1040 let env = TestEnv::new().await;
1041 env.init_metric_region().await;
1042 let engine = env.metric();
1043
1044 let logical_region_id = RegionId::new(175, 8345);
1045 let schema = test_util::row_schema_with_tags(&["def"]);
1046 let rows = test_util::build_rows(1, 100);
1047 let request = RegionRequest::Put(RegionPutRequest {
1048 rows: Rows { schema, rows },
1049 hint: None,
1050 partition_expr_version: None,
1051 });
1052
1053 engine
1054 .handle_request(logical_region_id, request)
1055 .await
1056 .unwrap_err();
1057 }
1058
1059 #[tokio::test]
1060 async fn test_batch_write_multiple_logical_regions() {
1061 let env = TestEnv::new().await;
1062 env.init_metric_region().await;
1063 let engine = env.metric();
1064
1065 let physical_region_id = env.default_physical_region_id();
1067 let logical_region_1 = env.default_logical_region_id();
1068 let logical_region_2 = RegionId::new(1024, 1);
1069 let logical_region_3 = RegionId::new(1024, 2);
1070
1071 env.create_logical_region(physical_region_id, logical_region_2)
1072 .await;
1073 env.create_logical_region(physical_region_id, logical_region_3)
1074 .await;
1075
1076 let schema = test_util::row_schema_with_tags(&["job"]);
1078
1079 let rows1 = test_util::build_rows(1, 3);
1084 let mut rows2 = test_util::build_rows(1, 2);
1085 let mut rows3 = test_util::build_rows(1, 5);
1086
1087 use api::v1::value::ValueData;
1089 for (i, row) in rows2.iter_mut().enumerate() {
1090 if let Some(ValueData::TimestampMillisecondValue(ts)) =
1091 row.values.get_mut(0).and_then(|v| v.value_data.as_mut())
1092 {
1093 *ts = (10 + i) as i64;
1094 }
1095 }
1096 for (i, row) in rows3.iter_mut().enumerate() {
1097 if let Some(ValueData::TimestampMillisecondValue(ts)) =
1098 row.values.get_mut(0).and_then(|v| v.value_data.as_mut())
1099 {
1100 *ts = (20 + i) as i64;
1101 }
1102 }
1103
1104 let requests = vec![
1105 (
1106 logical_region_1,
1107 RegionPutRequest {
1108 rows: Rows {
1109 schema: schema.clone(),
1110 rows: rows1,
1111 },
1112 hint: None,
1113 partition_expr_version: None,
1114 },
1115 ),
1116 (
1117 logical_region_2,
1118 RegionPutRequest {
1119 rows: Rows {
1120 schema: schema.clone(),
1121 rows: rows2,
1122 },
1123 hint: None,
1124 partition_expr_version: None,
1125 },
1126 ),
1127 (
1128 logical_region_3,
1129 RegionPutRequest {
1130 rows: Rows {
1131 schema: schema.clone(),
1132 rows: rows3,
1133 },
1134 hint: None,
1135 partition_expr_version: None,
1136 },
1137 ),
1138 ];
1139
1140 let affected_rows = engine
1142 .inner
1143 .put_regions_batch(requests.into_iter())
1144 .await
1145 .unwrap();
1146 assert_eq!(affected_rows, 10);
1147
1148 let request = ScanRequest::default();
1150 let stream = env
1151 .metric()
1152 .scan_to_stream(physical_region_id, request)
1153 .await
1154 .unwrap();
1155 let batches = RecordBatches::try_collect(stream).await.unwrap();
1156
1157 assert_eq!(batches.iter().map(|b| b.num_rows()).sum::<usize>(), 10);
1159 }
1160
1161 #[tokio::test]
1162 async fn test_batch_write_with_partial_failure() {
1163 let env = TestEnv::new().await;
1164 env.init_metric_region().await;
1165 let engine = env.metric();
1166
1167 let physical_region_id = env.default_physical_region_id();
1168 let logical_region_1 = env.default_logical_region_id();
1169 let logical_region_2 = RegionId::new(1024, 1);
1170 let nonexistent_region = RegionId::new(9999, 9999);
1171
1172 env.create_logical_region(physical_region_id, logical_region_2)
1173 .await;
1174
1175 let schema = test_util::row_schema_with_tags(&["job"]);
1177 let requests = vec![
1178 (
1179 logical_region_1,
1180 RegionPutRequest {
1181 rows: Rows {
1182 schema: schema.clone(),
1183 rows: test_util::build_rows(1, 3),
1184 },
1185 hint: None,
1186 partition_expr_version: None,
1187 },
1188 ),
1189 (
1190 nonexistent_region,
1191 RegionPutRequest {
1192 rows: Rows {
1193 schema: schema.clone(),
1194 rows: test_util::build_rows(1, 2),
1195 },
1196 hint: None,
1197 partition_expr_version: None,
1198 },
1199 ),
1200 (
1201 logical_region_2,
1202 RegionPutRequest {
1203 rows: Rows {
1204 schema: schema.clone(),
1205 rows: test_util::build_rows(1, 5),
1206 },
1207 hint: None,
1208 partition_expr_version: None,
1209 },
1210 ),
1211 ];
1212
1213 let result = engine.inner.put_regions_batch(requests.into_iter()).await;
1215 assert!(result.is_err());
1216
1217 let request = ScanRequest::default();
1220 let stream = env
1221 .metric()
1222 .scan_to_stream(physical_region_id, request)
1223 .await
1224 .unwrap();
1225 let batches = RecordBatches::try_collect(stream).await.unwrap();
1226
1227 assert_eq!(batches.iter().map(|b| b.num_rows()).sum::<usize>(), 0);
1228 }
1229
1230 #[tokio::test]
1231 async fn test_batch_write_single_request_fast_path() {
1232 let env = TestEnv::new().await;
1233 env.init_metric_region().await;
1234 let engine = env.metric();
1235
1236 let logical_region_id = env.default_logical_region_id();
1237 let schema = test_util::row_schema_with_tags(&["job"]);
1238
1239 let requests = vec![(
1241 logical_region_id,
1242 RegionPutRequest {
1243 rows: Rows {
1244 schema,
1245 rows: test_util::build_rows(1, 5),
1246 },
1247 hint: None,
1248 partition_expr_version: None,
1249 },
1250 )];
1251
1252 let affected_rows = engine
1253 .inner
1254 .put_regions_batch(requests.into_iter())
1255 .await
1256 .unwrap();
1257 assert_eq!(affected_rows, 5);
1258 }
1259
1260 #[tokio::test]
1261 async fn test_batch_write_empty_requests() {
1262 let env = TestEnv::new().await;
1263 env.init_metric_region().await;
1264 let engine = env.metric();
1265
1266 let requests = vec![];
1268 let affected_rows = engine
1269 .inner
1270 .put_regions_batch(requests.into_iter())
1271 .await
1272 .unwrap();
1273
1274 assert_eq!(affected_rows, 0);
1275 }
1276
1277 #[tokio::test]
1278 async fn test_batch_write_sparse_encoding() {
1279 let env = TestEnv::new().await;
1280 let physical_region_id = env.default_physical_region_id();
1281
1282 run_batch_write_with_schema_variants(
1283 &env,
1284 physical_region_id,
1285 vec![(
1286 MEMTABLE_PARTITION_TREE_PRIMARY_KEY_ENCODING.to_string(),
1287 "sparse".to_string(),
1288 )],
1289 true,
1290 )
1291 .await;
1292 }
1293
1294 #[tokio::test]
1295 async fn test_batch_write_dense_encoding() {
1296 let env = TestEnv::new().await;
1297 let physical_region_id = env.default_physical_region_id();
1298
1299 run_batch_write_with_schema_variants(
1300 &env,
1301 physical_region_id,
1302 vec![(
1303 MEMTABLE_PARTITION_TREE_PRIMARY_KEY_ENCODING.to_string(),
1304 "dense".to_string(),
1305 )],
1306 false,
1307 )
1308 .await;
1309 }
1310
1311 #[tokio::test]
1312 async fn test_metric_put_rejects_bad_partition_expr_version() {
1313 let env = TestEnv::new().await;
1314 env.init_metric_region().await;
1315
1316 let logical_region_id = env.default_logical_region_id();
1317 let rows = Rows {
1318 schema: test_util::row_schema_with_tags(&["job"]),
1319 rows: test_util::build_rows(1, 3),
1320 };
1321
1322 let err = env
1323 .metric()
1324 .handle_request(
1325 logical_region_id,
1326 RegionRequest::Put(RegionPutRequest {
1327 rows,
1328 hint: None,
1329 partition_expr_version: Some(1),
1330 }),
1331 )
1332 .await
1333 .unwrap_err();
1334
1335 assert_eq!(err.status_code(), StatusCode::InvalidArguments);
1336 }
1337
1338 #[tokio::test]
1339 async fn test_metric_put_respects_staging_partition_expr_version() {
1340 let env = TestEnv::new().await;
1341 env.init_metric_region().await;
1342
1343 let logical_region_id = env.default_logical_region_id();
1344 let physical_region_id = env.default_physical_region_id();
1345 let partition_expr = job_partition_expr_json();
1346 env.metric()
1347 .handle_request(
1348 physical_region_id,
1349 RegionRequest::EnterStaging(EnterStagingRequest {
1350 partition_directive: StagingPartitionDirective::UpdatePartitionExpr(
1351 partition_expr.clone(),
1352 ),
1353 }),
1354 )
1355 .await
1356 .unwrap();
1357
1358 let expected_version = partition_expr_version(Some(&partition_expr));
1359 let rows = Rows {
1360 schema: test_util::row_schema_with_tags(&["job"]),
1361 rows: test_util::build_rows(1, 3),
1362 };
1363
1364 let err = env
1365 .metric()
1366 .handle_request(
1367 logical_region_id,
1368 RegionRequest::Put(RegionPutRequest {
1369 rows: rows.clone(),
1370 hint: None,
1371 partition_expr_version: Some(expected_version.wrapping_add(1)),
1372 }),
1373 )
1374 .await
1375 .unwrap_err();
1376 assert_eq!(err.status_code(), StatusCode::InvalidArguments);
1377
1378 let response = env
1379 .metric()
1380 .handle_request(
1381 logical_region_id,
1382 RegionRequest::Put(RegionPutRequest {
1383 rows: rows.clone(),
1384 hint: None,
1385 partition_expr_version: None,
1386 }),
1387 )
1388 .await
1389 .unwrap();
1390 assert_eq!(response.affected_rows, 3);
1391
1392 let response = env
1393 .metric()
1394 .handle_request(
1395 logical_region_id,
1396 RegionRequest::Put(RegionPutRequest {
1397 rows,
1398 hint: None,
1399 partition_expr_version: Some(expected_version),
1400 }),
1401 )
1402 .await
1403 .unwrap();
1404 assert_eq!(response.affected_rows, 3);
1405 }
1406
1407 #[tokio::test]
1411 async fn test_verify_rows_rejects_wrong_type() {
1412 use api::v1::value::ValueData;
1413 use api::v1::{ColumnDataType, ColumnSchema as PbColumnSchema, SemanticType};
1414 use common_query::prelude::{greptime_timestamp, greptime_value};
1415
1416 let env = TestEnv::new().await;
1417 env.init_metric_region().await;
1418
1419 let logical_region_id = env.default_logical_region_id();
1420
1421 let schema = vec![
1424 PbColumnSchema {
1425 column_name: greptime_timestamp().to_string(),
1426 datatype: ColumnDataType::String as i32,
1427 semantic_type: SemanticType::Timestamp as _,
1428 datatype_extension: None,
1429 options: None,
1430 },
1431 PbColumnSchema {
1432 column_name: greptime_value().to_string(),
1433 datatype: ColumnDataType::Float64 as i32,
1434 semantic_type: SemanticType::Field as _,
1435 datatype_extension: None,
1436 options: None,
1437 },
1438 PbColumnSchema {
1439 column_name: "job".to_string(),
1440 datatype: ColumnDataType::String as i32,
1441 semantic_type: SemanticType::Tag as _,
1442 datatype_extension: None,
1443 options: None,
1444 },
1445 ];
1446 let rows = vec![Row {
1447 values: vec![
1448 Value {
1449 value_data: Some(ValueData::StringValue("not-a-timestamp".to_string())),
1450 },
1451 Value {
1452 value_data: Some(ValueData::F64Value(1.0)),
1453 },
1454 Value {
1455 value_data: Some(ValueData::StringValue("tag_0".to_string())),
1456 },
1457 ],
1458 }];
1459
1460 let err = env
1461 .metric()
1462 .handle_request(
1463 logical_region_id,
1464 RegionRequest::Put(RegionPutRequest {
1465 rows: Rows { schema, rows },
1466 hint: None,
1467 partition_expr_version: None,
1468 }),
1469 )
1470 .await
1471 .unwrap_err();
1472 assert_eq!(err.status_code(), StatusCode::InvalidArguments);
1473 }
1474
1475 #[tokio::test]
1479 async fn test_verify_rows_rejects_missing_time_index() {
1480 use api::v1::{ColumnDataType, ColumnSchema as PbColumnSchema, SemanticType};
1481 use common_query::prelude::greptime_value;
1482
1483 let env = TestEnv::new().await;
1484 env.init_metric_region().await;
1485
1486 let logical_region_id = env.default_logical_region_id();
1487
1488 let schema = vec![
1490 PbColumnSchema {
1491 column_name: greptime_value().to_string(),
1492 datatype: ColumnDataType::Float64 as i32,
1493 semantic_type: SemanticType::Field as _,
1494 datatype_extension: None,
1495 options: None,
1496 },
1497 PbColumnSchema {
1498 column_name: "job".to_string(),
1499 datatype: ColumnDataType::String as i32,
1500 semantic_type: SemanticType::Tag as _,
1501 datatype_extension: None,
1502 options: None,
1503 },
1504 ];
1505 let rows = vec![Row {
1506 values: vec![
1507 Value {
1508 value_data: Some(api::v1::value::ValueData::F64Value(1.0)),
1509 },
1510 Value {
1511 value_data: Some(api::v1::value::ValueData::StringValue("tag_0".to_string())),
1512 },
1513 ],
1514 }];
1515
1516 let err = env
1517 .metric()
1518 .handle_request(
1519 logical_region_id,
1520 RegionRequest::Put(RegionPutRequest {
1521 rows: Rows { schema, rows },
1522 hint: None,
1523 partition_expr_version: None,
1524 }),
1525 )
1526 .await
1527 .unwrap_err();
1528 assert_eq!(err.status_code(), StatusCode::InvalidArguments);
1529 }
1530
1531 #[tokio::test]
1532 async fn test_verify_rows_rejects_missing_field() {
1533 use api::v1::value::ValueData;
1534 use api::v1::{ColumnDataType, ColumnSchema as PbColumnSchema, SemanticType};
1535 use common_query::prelude::greptime_timestamp;
1536
1537 let env = TestEnv::new().await;
1538 env.init_metric_region().await;
1539
1540 let logical_region_id = env.default_logical_region_id();
1541
1542 let schema = vec![
1544 PbColumnSchema {
1545 column_name: greptime_timestamp().to_string(),
1546 datatype: ColumnDataType::TimestampMillisecond as i32,
1547 semantic_type: SemanticType::Timestamp as _,
1548 datatype_extension: None,
1549 options: None,
1550 },
1551 PbColumnSchema {
1552 column_name: "job".to_string(),
1553 datatype: ColumnDataType::String as i32,
1554 semantic_type: SemanticType::Tag as _,
1555 datatype_extension: None,
1556 options: None,
1557 },
1558 ];
1559 let rows = vec![Row {
1560 values: vec![
1561 Value {
1562 value_data: Some(ValueData::TimestampMillisecondValue(0)),
1563 },
1564 Value {
1565 value_data: Some(ValueData::StringValue("tag_0".to_string())),
1566 },
1567 ],
1568 }];
1569
1570 let err = env
1571 .metric()
1572 .handle_request(
1573 logical_region_id,
1574 RegionRequest::Put(RegionPutRequest {
1575 rows: Rows { schema, rows },
1576 hint: None,
1577 partition_expr_version: None,
1578 }),
1579 )
1580 .await
1581 .unwrap_err();
1582 let message = err.to_string();
1583 assert!(
1584 message.contains("missing required field column"),
1585 "expected field-completeness rejection, got: {message}"
1586 );
1587 assert_eq!(err.status_code(), StatusCode::InvalidArguments);
1588 }
1589
1590 #[test]
1591 fn test_fill_missing_field_column_nullable_no_default() {
1592 let field_meta = ColumnMetadata {
1593 column_id: 1,
1594 semantic_type: SemanticType::Field,
1595 column_schema: ColumnSchema::new(
1596 "greptime_value".to_string(),
1597 ConcreteDataType::float64_datatype(),
1598 true, ),
1600 };
1601 let mut rows = Rows {
1602 schema: vec![PbColumnSchema {
1603 column_name: "ts".to_string(),
1604 datatype: ColumnDataType::TimestampMillisecond as i32,
1605 semantic_type: SemanticType::Timestamp as _,
1606 datatype_extension: None,
1607 options: None,
1608 }],
1609 rows: vec![Row {
1610 values: vec![Value {
1611 value_data: Some(ValueData::TimestampMillisecondValue(0)),
1612 }],
1613 }],
1614 };
1615
1616 MetricEngineInner::fill_missing_field_column(
1617 RegionId::new(1, 1),
1618 "greptime_value",
1619 &field_meta,
1620 &mut rows,
1621 )
1622 .unwrap();
1623
1624 assert_eq!(rows.schema.len(), 2);
1625 assert_eq!(rows.schema[1].column_name, "greptime_value");
1626 assert_eq!(rows.rows[0].values.len(), 2);
1627 assert!(
1628 rows.rows[0].values[1].value_data.is_none(),
1629 "missing nullable field should be filled with null"
1630 );
1631 }
1632
1633 #[test]
1634 fn test_fill_missing_field_column_rejects_impure_default() {
1635 let field_meta = ColumnMetadata {
1636 column_id: 1,
1637 semantic_type: SemanticType::Field,
1638 column_schema: ColumnSchema::new(
1639 "greptime_value".to_string(),
1640 ConcreteDataType::timestamp_millisecond_datatype(),
1641 false,
1642 )
1643 .with_default_constraint(Some(ColumnDefaultConstraint::Function("now()".to_string())))
1644 .unwrap(),
1645 };
1646 let mut rows = Rows {
1647 schema: vec![PbColumnSchema {
1648 column_name: "ts".to_string(),
1649 datatype: api::v1::ColumnDataType::TimestampMillisecond as i32,
1650 semantic_type: SemanticType::Timestamp as _,
1651 datatype_extension: None,
1652 options: None,
1653 }],
1654 rows: vec![Row {
1655 values: vec![Value {
1656 value_data: Some(ValueData::TimestampMillisecondValue(0)),
1657 }],
1658 }],
1659 };
1660
1661 let err = MetricEngineInner::fill_missing_field_column(
1662 RegionId::new(1, 1),
1663 "greptime_value",
1664 &field_meta,
1665 &mut rows,
1666 )
1667 .unwrap_err();
1668 assert!(
1669 err.to_string().contains("impure default value"),
1670 "expected impure-default rejection, got: {err}"
1671 );
1672 }
1673}