From f7f52592b4fe5044cff7d68daf9db84cc62308c4 Mon Sep 17 00:00:00 2001 From: Ning Sun Date: Thu, 2 Oct 2025 23:11:09 +0800 Subject: [PATCH] fix: various typos reported by CI (#7047) * fix: various typos reported by CI * fix: additional typo Signed-off-by: WenyXu --- src/cli/src/metadata/control/get.rs | 6 +++--- src/cli/src/metadata/control/utils.rs | 2 +- src/common/function/src/aggrs/aggr_wrapper.rs | 4 ++-- src/common/telemetry/src/tracing_context.rs | 2 +- src/flow/src/plan/join.rs | 2 +- src/meta-client/src/client.rs | 2 +- src/meta-srv/src/election/rds/mysql.rs | 2 +- src/meta-srv/src/election/rds/postgres.rs | 2 +- src/mito2/src/manifest/manager.rs | 2 +- src/promql/src/extension_plan/scalar_calculate.rs | 4 ++-- src/query/src/dist_plan/planner.rs | 2 +- src/servers/src/http/event.rs | 6 +++--- src/sql/src/statements/truncate.rs | 2 +- tests-integration/tests/http.rs | 12 ++++++------ 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/cli/src/metadata/control/get.rs b/src/cli/src/metadata/control/get.rs index 38a488b53d..1ea713123c 100644 --- a/src/cli/src/metadata/control/get.rs +++ b/src/cli/src/metadata/control/get.rs @@ -29,7 +29,7 @@ use futures::TryStreamExt; use crate::error::InvalidArgumentsSnafu; use crate::metadata::common::StoreConfig; -use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter}; +use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_formatter}; use crate::Tool; /// Getting metadata from metadata store. @@ -206,7 +206,7 @@ impl Tool for GetTableTool { println!( "{}\n{}", TableInfoKey::new(table_id), - json_fromatter(self.pretty, &*table_info) + json_formatter(self.pretty, &*table_info) ); } else { println!("Table info not found"); @@ -221,7 +221,7 @@ impl Tool for GetTableTool { println!( "{}\n{}", TableRouteKey::new(table_id), - json_fromatter(self.pretty, &table_route) + json_formatter(self.pretty, &table_route) ); } else { println!("Table route not found"); diff --git a/src/cli/src/metadata/control/utils.rs b/src/cli/src/metadata/control/utils.rs index f557cb6be3..d563f310a5 100644 --- a/src/cli/src/metadata/control/utils.rs +++ b/src/cli/src/metadata/control/utils.rs @@ -27,7 +27,7 @@ pub fn decode_key_value(kv: KeyValue) -> CommonMetaResult<(String, String)> { } /// Formats a value as a JSON string. -pub fn json_fromatter(pretty: bool, value: &T) -> String +pub fn json_formatter(pretty: bool, value: &T) -> String where T: Serialize, { diff --git a/src/common/function/src/aggrs/aggr_wrapper.rs b/src/common/function/src/aggrs/aggr_wrapper.rs index 7c97414554..a01bbebd6f 100644 --- a/src/common/function/src/aggrs/aggr_wrapper.rs +++ b/src/common/function/src/aggrs/aggr_wrapper.rs @@ -332,7 +332,7 @@ impl AggregateUDFImpl for StateWrapper { self.inner.signature() } - /// Coerce types also do nothing, as optimzer should be able to already make struct types + /// Coerce types also do nothing, as optimizer should be able to already make struct types fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result> { self.inner.coerce_types(arg_types) } @@ -486,7 +486,7 @@ impl AggregateUDFImpl for MergeWrapper { &self.merge_signature } - /// Coerce types also do nothing, as optimzer should be able to already make struct types + /// Coerce types also do nothing, as optimizer should be able to already make struct types fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result> { // just check if the arg_types are only one and is struct array if arg_types.len() != 1 || !matches!(arg_types.first(), Some(DataType::Struct(_))) { diff --git a/src/common/telemetry/src/tracing_context.rs b/src/common/telemetry/src/tracing_context.rs index bf9c3dd916..fd389bccf5 100644 --- a/src/common/telemetry/src/tracing_context.rs +++ b/src/common/telemetry/src/tracing_context.rs @@ -19,7 +19,7 @@ use opentelemetry::propagation::TextMapPropagator; use opentelemetry_sdk::propagation::TraceContextPropagator; use tracing_opentelemetry::OpenTelemetrySpanExt; -// An wapper for `Futures` that provides tracing instrument adapters. +// An wrapper for `Futures` that provides tracing instrument adapters. pub trait FutureExt: std::future::Future + Sized { fn trace(self, span: tracing::span::Span) -> tracing::instrument::Instrumented; } diff --git a/src/flow/src/plan/join.rs b/src/flow/src/plan/join.rs index 1a437dd00d..411b00ee02 100644 --- a/src/flow/src/plan/join.rs +++ b/src/flow/src/plan/join.rs @@ -71,6 +71,6 @@ pub struct LinearStagePlan { /// The key expressions to use for the lookup relation. pub lookup_key: Vec, /// The closure to apply to the concatenation of the key columns, - /// the stream value columns, and the lookup value colunms. + /// the stream value columns, and the lookup value columns. pub closure: JoinFilter, } diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index ebf2a6a167..cf7e953062 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -525,7 +525,7 @@ impl MetaClient { self.heartbeat_client()?.ask_leader().await } - /// Returns a heartbeat bidirectional streaming: (sender, recever), the + /// Returns a heartbeat bidirectional streaming: (sender, receiver), the /// other end is the leader of `metasrv`. /// /// The `datanode` needs to use the sender to continuously send heartbeat diff --git a/src/meta-srv/src/election/rds/mysql.rs b/src/meta-srv/src/election/rds/mysql.rs index e36e1bfacd..d9a9405cad 100644 --- a/src/meta-srv/src/election/rds/mysql.rs +++ b/src/meta-srv/src/election/rds/mysql.rs @@ -1190,7 +1190,7 @@ mod tests { )); handles.push(handle); } - // Wait for candidates to registrate themselves and renew their leases at least once. + // Wait for candidates to register themselves and renew their leases at least once. tokio::time::sleep(candidate_lease_ttl / 2 + Duration::from_secs(1)).await; let (tx, _) = broadcast::channel(100); diff --git a/src/meta-srv/src/election/rds/postgres.rs b/src/meta-srv/src/election/rds/postgres.rs index 4d50fe0867..e43bd4424e 100644 --- a/src/meta-srv/src/election/rds/postgres.rs +++ b/src/meta-srv/src/election/rds/postgres.rs @@ -1012,7 +1012,7 @@ mod tests { )); handles.push(handle); } - // Wait for candidates to registrate themselves and renew their leases at least once. + // Wait for candidates to register themselves and renew their leases at least once. tokio::time::sleep(Duration::from_secs(3)).await; let (tx, _) = broadcast::channel(100); diff --git a/src/mito2/src/manifest/manager.rs b/src/mito2/src/manifest/manager.rs index 29a24abba6..7bb64071c5 100644 --- a/src/mito2/src/manifest/manager.rs +++ b/src/mito2/src/manifest/manager.rs @@ -76,7 +76,7 @@ pub struct RegionManifestOptions { /// -RegionMetadataRef metadata /// } /// class RegionEdit { -/// -VersionNumber regoin_version +/// -VersionNumber region_version /// -Vec~FileMeta~ files_to_add /// -Vec~FileMeta~ files_to_remove /// -SequenceNumber flushed_sequence diff --git a/src/promql/src/extension_plan/scalar_calculate.rs b/src/promql/src/extension_plan/scalar_calculate.rs index 86363cc154..e29aadebab 100644 --- a/src/promql/src/extension_plan/scalar_calculate.rs +++ b/src/promql/src/extension_plan/scalar_calculate.rs @@ -70,7 +70,7 @@ impl ScalarCalculate { interval: Millisecond, input: LogicalPlan, time_index: &str, - tag_colunms: &[String], + tag_columns: &[String], field_column: &str, table_name: Option<&str>, ) -> Result { @@ -97,7 +97,7 @@ impl ScalarCalculate { end, interval, time_index: time_index.to_string(), - tag_columns: tag_colunms.to_vec(), + tag_columns: tag_columns.to_vec(), field_column: field_column.to_string(), input, output_schema: Arc::new(schema), diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs index 1d79cc204a..a7cc939993 100644 --- a/src/query/src/dist_plan/planner.rs +++ b/src/query/src/dist_plan/planner.rs @@ -82,7 +82,7 @@ impl ExtensionPlanner for MergeSortExtensionPlanner { // and we only need to do a merge sort, otherwise fallback to quick sort let can_merge_sort = partition_cnt >= region_cnt; if can_merge_sort { - // TODO(discord9): use `SortPreversingMergeExec here` + // TODO(discord9): use `SortPreservingMergeExec here` } // for now merge sort only exist in logical plan, and have the same effect as `Sort` // doesn't change the execution plan, this will change in the future diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs index ad649c872f..cc5cbb9935 100644 --- a/src/servers/src/http/event.rs +++ b/src/servers/src/http/event.rs @@ -352,7 +352,7 @@ async fn dryrun_pipeline_inner( ) .await?; - let colume_type_key = "colume_type"; + let column_type_key = "column_type"; let data_type_key = "data_type"; let name_key = "name"; @@ -376,7 +376,7 @@ async fn dryrun_pipeline_inner( JsonValue::String(cs.datatype().as_str_name().to_string()), ); map.insert( - colume_type_key.to_string(), + column_type_key.to_string(), JsonValue::String(cs.semantic_type().as_str_name().to_string()), ); map.insert( @@ -409,7 +409,7 @@ async fn dryrun_pipeline_inner( ); map.insert( "semantic_type".to_string(), - schema[idx][colume_type_key].clone(), + schema[idx][column_type_key].clone(), ); map.insert( "data_type".to_string(), diff --git a/src/sql/src/statements/truncate.rs b/src/sql/src/statements/truncate.rs index a4e4c49d5b..b9c299601c 100644 --- a/src/sql/src/statements/truncate.rs +++ b/src/sql/src/statements/truncate.rs @@ -105,7 +105,7 @@ mod tests { use crate::statements::statement::Statement; #[test] - fn test_display_for_tuncate_table() { + fn test_display_for_truncate_table() { let sql = r"truncate table t1;"; let stmts: Vec = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index ef072b477b..f52d0d20a7 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -3212,37 +3212,37 @@ transform: let dryrun_schema = json!([ { - "colume_type": "FIELD", + "column_type": "FIELD", "data_type": "INT32", "fulltext": false, "name": "id1" }, { - "colume_type": "FIELD", + "column_type": "FIELD", "data_type": "INT32", "fulltext": false, "name": "id2" }, { - "colume_type": "FIELD", + "column_type": "FIELD", "data_type": "STRING", "fulltext": false, "name": "type" }, { - "colume_type": "FIELD", + "column_type": "FIELD", "data_type": "STRING", "fulltext": false, "name": "log" }, { - "colume_type": "FIELD", + "column_type": "FIELD", "data_type": "STRING", "fulltext": false, "name": "logger" }, { - "colume_type": "TIMESTAMP", + "column_type": "TIMESTAMP", "data_type": "TIMESTAMP_NANOSECOND", "fulltext": false, "name": "time"