diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs index 5c644803ec..efa3796fd2 100644 --- a/src/flow/src/adapter/node_context.rs +++ b/src/flow/src/adapter/node_context.rs @@ -336,7 +336,7 @@ impl FlownodeContext { let (known_table_name, schema) = srv_map.get_table_name_schema(&table_id).await?; table_name = table_name.or(Some(known_table_name)); self.schema.insert(global_id, schema); - } // if we don't have table id, it means database havn't assign one yet or we don't need it + } // if we don't have table id, it means database haven't assign one yet or we don't need it // still update the mapping with new global id self.table_repr.insert(table_name, table_id, global_id); diff --git a/src/flow/src/adapter/table_source.rs b/src/flow/src/adapter/table_source.rs index 7981999f0a..e067b273ea 100644 --- a/src/flow/src/adapter/table_source.rs +++ b/src/flow/src/adapter/table_source.rs @@ -62,7 +62,7 @@ impl TableSource { .map(|id| id.table_id()) } - /// If the table havn't been created in database, the tableId returned would be null + /// If the table haven't been created in database, the tableId returned would be null pub async fn get_table_id_from_name(&self, name: &TableName) -> Result, Error> { let ret = self .table_name_manager diff --git a/src/flow/src/df_optimizer.rs b/src/flow/src/df_optimizer.rs index a6f6092749..76eb16ef38 100644 --- a/src/flow/src/df_optimizer.rs +++ b/src/flow/src/df_optimizer.rs @@ -492,7 +492,7 @@ impl ScalarUDFImpl for TumbleExpand { if let Some(start_time) = opt{ if !matches!(start_time, Utf8 | Date32 | Date64 | Timestamp(_, _)){ return Err(DataFusionError::Plan( - format!("Expect start_time to either be date, timestampe or string, found {:?}", start_time) + format!("Expect start_time to either be date, timestamp or string, found {:?}", start_time) )); } } diff --git a/src/flow/src/repr.rs b/src/flow/src/repr.rs index 50f2a78ef8..f508e4adae 100644 --- a/src/flow/src/repr.rs +++ b/src/flow/src/repr.rs @@ -57,7 +57,7 @@ pub const BROADCAST_CAP: usize = 1024; /// The maximum capacity of the send buffer, to prevent the buffer from growing too large pub const SEND_BUF_CAP: usize = BROADCAST_CAP * 2; -/// Flow worker will try to at least accumulate this many rows before processing them(if one second havn't passed) +/// Flow worker will try to at least accumulate this many rows before processing them(if one second haven't passed) pub const BATCH_SIZE: usize = 32 * 16384; /// Convert a value that is or can be converted to Datetime to internal timestamp