From b6fa316c65546a0c771de0850aff53a0d5b7417a Mon Sep 17 00:00:00 2001 From: Dongxu Wang Date: Mon, 21 Nov 2022 14:07:45 +0800 Subject: [PATCH] chore: correct typos (#589) (#592) --- docs/how-to/how-to-write-aggregate-function.md | 4 ++-- src/api/greptime/v1/meta/heartbeat.proto | 2 +- src/catalog/src/system.rs | 2 +- src/client/examples/insert.rs | 6 +++--- src/client/src/database.rs | 2 +- src/common/grpc/src/lib.rs | 2 +- src/common/grpc/src/physical.rs | 2 +- src/common/grpc/src/physical/plan.rs | 14 +++++++------- src/common/query/src/logical_plan/udf.rs | 2 +- src/common/query/src/signature.rs | 8 ++++---- src/common/substrait/src/df_logical.rs | 10 +++++----- src/common/substrait/src/error.rs | 2 +- src/common/substrait/src/types.rs | 4 ++-- src/datanode/src/server/grpc/ddl.rs | 2 +- src/datanode/src/server/grpc/plan.rs | 2 +- src/datatypes/src/schema.rs | 2 +- .../src/vectors/operations/find_unique.rs | 2 +- src/frontend/src/spliter.rs | 16 ++++++++-------- src/frontend/src/table.rs | 2 +- src/meta-client/src/rpc/router.rs | 2 +- src/meta-srv/src/service/heartbeat.rs | 2 +- src/script/src/engine.rs | 2 +- src/script/src/python/builtins/mod.rs | 8 ++++---- src/script/src/python/builtins/test.rs | 2 +- src/script/src/python/coprocessor.rs | 8 ++++---- src/script/src/python/coprocessor/compile.rs | 6 +++--- src/script/src/python/coprocessor/parse.rs | 10 +++++----- src/script/src/python/error.rs | 2 +- src/script/src/python/vector.rs | 10 +++++----- src/servers/src/http.rs | 2 +- src/servers/src/server.rs | 8 ++++---- src/sql/src/parser.rs | 2 +- src/storage/src/region.rs | 2 +- src/storage/src/region/writer.rs | 2 +- src/storage/src/schema/projected.rs | 2 +- 35 files changed, 78 insertions(+), 78 deletions(-) diff --git a/docs/how-to/how-to-write-aggregate-function.md b/docs/how-to/how-to-write-aggregate-function.md index 15624353cd..519a78d4e9 100644 --- a/docs/how-to/how-to-write-aggregate-function.md +++ b/docs/how-to/how-to-write-aggregate-function.md @@ -55,7 +55,7 @@ The DataFusion basically execute aggregate like this: 2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation. 3. Call `state` to get each accumulator's internal state, the medial calculation result. 4. Call `merge_batch` to merge all accumulator's internal state to one. -5. Execute `evalute` on the chosen one to get the final calculation result. +5. Execute `evaluate` on the chosen one to get the final calculation result. Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details. @@ -63,7 +63,7 @@ Once you know the meaning of each method, you can easily write your accumulator. You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`". -The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, caculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two. +The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two. The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method. diff --git a/src/api/greptime/v1/meta/heartbeat.proto b/src/api/greptime/v1/meta/heartbeat.proto index 4cc89bc943..b4654efc6d 100644 --- a/src/api/greptime/v1/meta/heartbeat.proto +++ b/src/api/greptime/v1/meta/heartbeat.proto @@ -39,7 +39,7 @@ message NodeStat { uint64 wcus = 2; // Table number in this node uint64 table_num = 3; - // Regon number in this node + // Region number in this node uint64 region_num = 4; double cpu_usage = 5; diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs index a55f0d06f7..07084248f2 100644 --- a/src/catalog/src/system.rs +++ b/src/catalog/src/system.rs @@ -383,7 +383,7 @@ mod tests { use super::*; #[test] - pub fn test_decode_catalog_enrty() { + pub fn test_decode_catalog_entry() { let entry = decode_system_catalog( Some(EntryType::Catalog as u8), Some("some_catalog".as_bytes()), diff --git a/src/client/examples/insert.rs b/src/client/examples/insert.rs index fa31d0c01e..e85d45200c 100644 --- a/src/client/examples/insert.rs +++ b/src/client/examples/insert.rs @@ -43,7 +43,7 @@ async fn run() { fn insert_batches() -> Vec> { const SEMANTIC_TAG: i32 = 0; - const SEMANTIC_FEILD: i32 = 1; + const SEMANTIC_FIELD: i32 = 1; const SEMANTIC_TS: i32 = 2; let row_count = 4; @@ -71,7 +71,7 @@ fn insert_batches() -> Vec> { }; let cpu_column = Column { column_name: "cpu".to_string(), - semantic_type: SEMANTIC_FEILD, + semantic_type: SEMANTIC_FIELD, values: Some(cpu_vals), null_mask: vec![2], ..Default::default() @@ -83,7 +83,7 @@ fn insert_batches() -> Vec> { }; let mem_column = Column { column_name: "memory".to_string(), - semantic_type: SEMANTIC_FEILD, + semantic_type: SEMANTIC_FIELD, values: Some(mem_vals), null_mask: vec![4], ..Default::default() diff --git a/src/client/src/database.rs b/src/client/src/database.rs index 3415acd2ec..9cea7d5d85 100644 --- a/src/client/src/database.rs +++ b/src/client/src/database.rs @@ -22,7 +22,7 @@ use api::v1::{ SelectExpr, }; use common_error::status_code::StatusCode; -use common_grpc::{AsExcutionPlan, DefaultAsPlanImpl}; +use common_grpc::{AsExecutionPlan, DefaultAsPlanImpl}; use common_insert::column_to_vector; use common_query::Output; use common_recordbatch::{RecordBatch, RecordBatches}; diff --git a/src/common/grpc/src/lib.rs b/src/common/grpc/src/lib.rs index 8444de9086..a3d95e0805 100644 --- a/src/common/grpc/src/lib.rs +++ b/src/common/grpc/src/lib.rs @@ -20,4 +20,4 @@ pub mod writer; pub use error::Error; pub use physical::plan::{DefaultAsPlanImpl, MockExecution}; -pub use physical::AsExcutionPlan; +pub use physical::AsExecutionPlan; diff --git a/src/common/grpc/src/physical.rs b/src/common/grpc/src/physical.rs index 13778a8634..40ce20bef6 100644 --- a/src/common/grpc/src/physical.rs +++ b/src/common/grpc/src/physical.rs @@ -22,7 +22,7 @@ use datafusion::physical_plan::ExecutionPlan; pub type ExecutionPlanRef = Arc; -pub trait AsExcutionPlan { +pub trait AsExecutionPlan { type Error: std::error::Error; fn try_into_physical_plan(&self) -> Result; diff --git a/src/common/grpc/src/physical/plan.rs b/src/common/grpc/src/physical/plan.rs index 019b11ac2d..798f31b452 100644 --- a/src/common/grpc/src/physical/plan.rs +++ b/src/common/grpc/src/physical/plan.rs @@ -35,13 +35,13 @@ use crate::error::{ DecodePhysicalPlanNodeSnafu, EmptyPhysicalPlanSnafu, Error, MissingFieldSnafu, NewProjectionSnafu, UnsupportedDfPlanSnafu, }; -use crate::physical::{expr, AsExcutionPlan, ExecutionPlanRef}; +use crate::physical::{expr, AsExecutionPlan, ExecutionPlanRef}; pub struct DefaultAsPlanImpl { pub bytes: Vec, } -impl AsExcutionPlan for DefaultAsPlanImpl { +impl AsExecutionPlan for DefaultAsPlanImpl { type Error = Error; // Vec -> PhysicalPlanNode -> ExecutionPlanRef @@ -64,7 +64,7 @@ impl AsExcutionPlan for DefaultAsPlanImpl { } } -impl AsExcutionPlan for PhysicalPlanNode { +impl AsExecutionPlan for PhysicalPlanNode { type Error = Error; fn try_into_physical_plan(&self) -> Result { @@ -227,7 +227,7 @@ mod tests { use datafusion::physical_plan::projection::ProjectionExec; use crate::physical::plan::{DefaultAsPlanImpl, MockExecution}; - use crate::physical::{AsExcutionPlan, ExecutionPlanRef}; + use crate::physical::{AsExecutionPlan, ExecutionPlanRef}; #[test] fn test_convert_df_projection_with_bytes() { @@ -236,7 +236,7 @@ mod tests { let bytes = DefaultAsPlanImpl::try_from_physical_plan(projection_exec).unwrap(); let exec = bytes.try_into_physical_plan().unwrap(); - verify_df_porjection(exec); + verify_df_projection(exec); } #[test] @@ -246,7 +246,7 @@ mod tests { let projection_node = PhysicalPlanNode::try_from_physical_plan(projection_exec).unwrap(); let exec = projection_node.try_into_physical_plan().unwrap(); - verify_df_porjection(exec); + verify_df_projection(exec); } fn mock_df_projection() -> Arc { @@ -264,7 +264,7 @@ mod tests { ) } - fn verify_df_porjection(exec: ExecutionPlanRef) { + fn verify_df_projection(exec: ExecutionPlanRef) { let projection_exec = exec.as_any().downcast_ref::().unwrap(); let mock_input = projection_exec .input() diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs index b16fb50254..61a3653f81 100644 --- a/src/common/query/src/logical_plan/udf.rs +++ b/src/common/query/src/logical_plan/udf.rs @@ -13,7 +13,7 @@ // limitations under the License. //! Udf module contains foundational types that are used to represent UDFs. -//! It's modifed from datafusion. +//! It's modified from datafusion. use std::fmt; use std::fmt::{Debug, Formatter}; use std::sync::Arc; diff --git a/src/common/query/src/signature.rs b/src/common/query/src/signature.rs index 332ddbd83a..c8d4963b6e 100644 --- a/src/common/query/src/signature.rs +++ b/src/common/query/src/signature.rs @@ -53,7 +53,7 @@ pub struct Signature { } #[inline] -fn concret_types_to_arrow_types(ts: Vec) -> Vec { +fn concrete_types_to_arrow_types(ts: Vec) -> Vec { ts.iter().map(ConcreteDataType::as_arrow_type).collect() } @@ -118,14 +118,14 @@ impl From for DfTypeSignature { fn from(type_signature: TypeSignature) -> DfTypeSignature { match type_signature { TypeSignature::Variadic(types) => { - DfTypeSignature::Variadic(concret_types_to_arrow_types(types)) + DfTypeSignature::Variadic(concrete_types_to_arrow_types(types)) } TypeSignature::VariadicEqual => DfTypeSignature::VariadicEqual, TypeSignature::Uniform(n, types) => { - DfTypeSignature::Uniform(n, concret_types_to_arrow_types(types)) + DfTypeSignature::Uniform(n, concrete_types_to_arrow_types(types)) } TypeSignature::Exact(types) => { - DfTypeSignature::Exact(concret_types_to_arrow_types(types)) + DfTypeSignature::Exact(concrete_types_to_arrow_types(types)) } TypeSignature::Any(n) => DfTypeSignature::Any(n), TypeSignature::OneOf(ts) => { diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs index f054be910a..6f0573144c 100644 --- a/src/common/substrait/src/df_logical.rs +++ b/src/common/substrait/src/df_logical.rs @@ -178,14 +178,14 @@ impl DFLogicalSubstraitConvertor { })?; let adapter = Arc::new(DfTableProviderAdapter::new(table_ref)); - // Get schema directly from the table, and compare it with the schema retrived from substrait proto. + // Get schema directly from the table, and compare it with the schema retrieved from substrait proto. let stored_schema = adapter.schema(); - let retrived_schema = to_schema(read_rel.base_schema.unwrap_or_default())?; - let retrived_arrow_schema = retrived_schema.arrow_schema(); + let retrieved_schema = to_schema(read_rel.base_schema.unwrap_or_default())?; + let retrieved_arrow_schema = retrieved_schema.arrow_schema(); ensure!( - stored_schema.fields == retrived_arrow_schema.fields, + stored_schema.fields == retrieved_arrow_schema.fields, SchemaNotMatchSnafu { - substrait_schema: retrived_arrow_schema.clone(), + substrait_schema: retrieved_arrow_schema.clone(), storage_schema: stored_schema } ); diff --git a/src/common/substrait/src/error.rs b/src/common/substrait/src/error.rs index 16e6bec1aa..74e2112a91 100644 --- a/src/common/substrait/src/error.rs +++ b/src/common/substrait/src/error.rs @@ -81,7 +81,7 @@ pub enum Error { source: BoxedError, }, - #[snafu(display("Table quering not found: {}", name))] + #[snafu(display("Table querying not found: {}", name))] TableNotFound { name: String, backtrace: Backtrace }, #[snafu(display("Cannot convert plan doesn't belong to GreptimeDB"))] diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs index 1cd563c3ae..fd4cc34fbe 100644 --- a/src/common/substrait/src/types.rs +++ b/src/common/substrait/src/types.rs @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Methods that perform convertion between Substrait's type ([Type](SType)) and GreptimeDB's type ([ConcreteDataType]). +//! Methods that perform conversion between Substrait's type ([Type](SType)) and GreptimeDB's type ([ConcreteDataType]). //! //! Substrait use [type variation](https://substrait.io/types/type_variations/) to express different "logical types". -//! Current we only have variations on integer types. Variation 0 (system prefered) are the same with base types, which +//! Current we only have variations on integer types. Variation 0 (system preferred) are the same with base types, which //! are signed integer (i.e. I8 -> [i8]), and Variation 1 stands for unsigned integer (i.e. I8 -> [u8]). use datatypes::prelude::ConcreteDataType; diff --git a/src/datanode/src/server/grpc/ddl.rs b/src/datanode/src/server/grpc/ddl.rs index 90dc0e4a2b..1bb2c5d7f9 100644 --- a/src/datanode/src/server/grpc/ddl.rs +++ b/src/datanode/src/server/grpc/ddl.rs @@ -339,7 +339,7 @@ mod tests { assert_eq!(column_schema.data_type, ConcreteDataType::string_datatype()); assert!(column_schema.is_nullable()); - let default_constraint = ColumnDefaultConstraint::Value(Value::from("defaut value")); + let default_constraint = ColumnDefaultConstraint::Value(Value::from("default value")); let column_def = ColumnDef { name: "a".to_string(), datatype: 12, // string diff --git a/src/datanode/src/server/grpc/plan.rs b/src/datanode/src/server/grpc/plan.rs index 48974d1075..5c228852f2 100644 --- a/src/datanode/src/server/grpc/plan.rs +++ b/src/datanode/src/server/grpc/plan.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use common_grpc::{AsExcutionPlan, DefaultAsPlanImpl}; +use common_grpc::{AsExecutionPlan, DefaultAsPlanImpl}; use common_query::physical_plan::{PhysicalPlanAdapter, PhysicalPlanRef}; use common_query::Output; use datatypes::schema::Schema; diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs index cb3df93896..a1792fd665 100644 --- a/src/datatypes/src/schema.rs +++ b/src/datatypes/src/schema.rs @@ -156,7 +156,7 @@ impl Schema { /// Create a schema from a vector of [ColumnSchema]. /// /// # Panics - /// Panics when ColumnSchema's `default_constrait` can't be serialized into json. + /// Panics when ColumnSchema's `default_constraint` can't be serialized into json. pub fn new(column_schemas: Vec) -> Schema { // Builder won't fail in this case SchemaBuilder::try_from(column_schemas) diff --git a/src/datatypes/src/vectors/operations/find_unique.rs b/src/datatypes/src/vectors/operations/find_unique.rs index 3aade173ac..6041a9d9fb 100644 --- a/src/datatypes/src/vectors/operations/find_unique.rs +++ b/src/datatypes/src/vectors/operations/find_unique.rs @@ -44,7 +44,7 @@ pub(crate) fn find_unique_scalar<'a, T: ScalarVector>( } } - // Marks first element as selcted if it is different from previous element, otherwise + // Marks first element as selected if it is different from previous element, otherwise // keep selected bitmap unchanged. let is_first_not_duplicate = prev_vector .map(|pv| { diff --git a/src/frontend/src/spliter.rs b/src/frontend/src/spliter.rs index 06fc9eca8e..eb87907651 100644 --- a/src/frontend/src/spliter.rs +++ b/src/frontend/src/spliter.rs @@ -32,7 +32,7 @@ pub struct WriteSpliter { } impl WriteSpliter { - pub fn with_patition_rule(rule: PartitionRuleRef) -> Self { + pub fn with_partition_rule(rule: PartitionRuleRef) -> Self { Self { partition_rule: rule, } @@ -204,7 +204,7 @@ mod tests { fn test_writer_spliter() { let insert = mock_insert_request(); let rule = Arc::new(MockPartitionRule) as PartitionRuleRef; - let spliter = WriteSpliter::with_patition_rule(rule); + let spliter = WriteSpliter::with_partition_rule(rule); let ret = spliter.split(insert).unwrap(); assert_eq!(2, ret.len()); @@ -354,16 +354,16 @@ mod tests { let vectors = vec![v1, v2]; let row_0_vals = partition_values(&vectors, 0); - let expeted: Vec = vec![true.into(), "host1".into()]; - assert_eq!(expeted, row_0_vals); + let expected: Vec = vec![true.into(), "host1".into()]; + assert_eq!(expected, row_0_vals); let row_1_vals = partition_values(&vectors, 1); - let expeted: Vec = vec![false.into(), Value::Null]; - assert_eq!(expeted, row_1_vals); + let expected: Vec = vec![false.into(), Value::Null]; + assert_eq!(expected, row_1_vals); let row_2_vals = partition_values(&vectors, 2); - let expeted: Vec = vec![true.into(), "host3".into()]; - assert_eq!(expeted, row_2_vals); + let expected: Vec = vec![true.into(), "host3".into()]; + assert_eq!(expected, row_2_vals); } fn mock_insert_request() -> InsertRequest { diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs index ea4d70e387..0c07fd14f7 100644 --- a/src/frontend/src/table.rs +++ b/src/frontend/src/table.rs @@ -81,7 +81,7 @@ impl Table for DistTable { async fn insert(&self, request: InsertRequest) -> table::Result { let partition_rule = self.find_partition_rule().await.map_err(TableError::new)?; - let spliter = WriteSpliter::with_patition_rule(partition_rule); + let spliter = WriteSpliter::with_partition_rule(partition_rule); let inserts = spliter.split(request).map_err(TableError::new)?; let result = match self.dist_insert(inserts).await.map_err(TableError::new)? { client::ObjectResult::Select(_) => unreachable!(), diff --git a/src/meta-client/src/rpc/router.rs b/src/meta-client/src/rpc/router.rs index 03ceeb285f..361b2fe788 100644 --- a/src/meta-client/src/rpc/router.rs +++ b/src/meta-client/src/rpc/router.rs @@ -186,7 +186,7 @@ impl TryFrom for Table { let table_name = t .table_name .context(error::RouteInfoCorruptedSnafu { - err_msg: "table name requied", + err_msg: "table name required", })? .into(); Ok(Self { diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs index a71f9cd63f..e3882478f4 100644 --- a/src/meta-srv/src/service/heartbeat.rs +++ b/src/meta-srv/src/service/heartbeat.rs @@ -85,7 +85,7 @@ impl heartbeat_server::Heartbeat for MetaSrv { match tx.send(Err(err)).await { Ok(_) => (), - Err(_err) => break, // response was droped + Err(_err) => break, // response was dropped } } } diff --git a/src/script/src/engine.rs b/src/script/src/engine.rs index c9cbd9764a..8740dd0dcc 100644 --- a/src/script/src/engine.rs +++ b/src/script/src/engine.rs @@ -51,7 +51,7 @@ pub trait ScriptEngine { ) -> std::result::Result; } -/// Evalute script context +/// Evaluate script context #[derive(Debug, Default)] pub struct EvalContext {} diff --git a/src/script/src/python/builtins/mod.rs b/src/script/src/python/builtins/mod.rs index d6ebc5856f..5bee6e5577 100644 --- a/src/script/src/python/builtins/mod.rs +++ b/src/script/src/python/builtins/mod.rs @@ -235,7 +235,7 @@ macro_rules! bind_call_unary_math_function { /// The macro for binding function in `datafusion_physical_expr::expressions`(most of them are aggregate function) /// -/// - first arguements is the name of datafusion expression function like `Avg` +/// - first arguments is the name of datafusion expression function like `Avg` /// - second is the python virtual machine ident `vm` /// - following is the actual args passing in(as a slice).i.e.`&[values.to_arrow_array()]` /// - the data type of passing in args, i.e: `Datatype::Float64` @@ -259,7 +259,7 @@ fn from_df_err(err: DataFusionError, vm: &VirtualMachine) -> PyBaseExceptionRef vm.new_runtime_error(format!("Data Fusion Error: {err:#?}")) } -/// evalute Aggregate Expr using its backing accumulator +/// evaluate Aggregate Expr using its backing accumulator fn eval_aggr_fn( aggr: T, values: &[ArrayRef], @@ -1120,7 +1120,7 @@ pub(crate) mod greptime_builtin { State::Num(v) => { if cur_idx + 1 > parsed.len() { return Err(vm.new_runtime_error( - "Expect a spearator after number, found nothing!".to_string(), + "Expect a separator after number, found nothing!".to_string(), )); } let nxt = &parsed[cur_idx + 1]; @@ -1128,7 +1128,7 @@ pub(crate) mod greptime_builtin { tot_time += v * factor(sep, vm)?; } else { return Err(vm.new_runtime_error(format!( - "Expect a spearator after number, found `{nxt:#?}`" + "Expect a separator after number, found `{nxt:#?}`" ))); } cur_idx += 2; diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs index 2f008611da..43ad8d4f5d 100644 --- a/src/script/src/python/builtins/test.rs +++ b/src/script/src/python/builtins/test.rs @@ -118,7 +118,7 @@ struct Var { ty: DataType, } -/// for floating number comparsion +/// for floating number comparison const EPS: f64 = 2.0 * f64::EPSILON; /// Null element just not supported for now for simplicity with writing test cases diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs index 8dad524adf..214deaa24b 100644 --- a/src/script/src/python/coprocessor.rs +++ b/src/script/src/python/coprocessor.rs @@ -49,7 +49,7 @@ use crate::python::PyVector; #[cfg_attr(test, derive(Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct AnnotationInfo { - /// if None, use types infered by PyVector + /// if None, use types inferred by PyVector pub datatype: Option, pub is_nullable: bool, } @@ -115,7 +115,7 @@ impl Coprocessor { datatype: ty, is_nullable, } = anno[idx].to_owned().unwrap_or_else(|| - // default to be not nullable and use DataType infered by PyVector itself + // default to be not nullable and use DataType inferred by PyVector itself AnnotationInfo{ datatype: Some(real_ty.to_owned()), is_nullable: false @@ -208,7 +208,7 @@ fn try_into_py_vector(fetch_args: Vec) -> Result> { } _ => { return ret_other_error_with(format!( - "Unsupport data type at column {idx}: {:?} for coprocessor", + "Unsupported data type at column {idx}: {:?} for coprocessor", arg.data_type() )) .fail() @@ -348,7 +348,7 @@ fn set_items_in_scope( /// ``` /// /// # Type Annotation -/// you can use type annotations in args and returns to designate types, so coprocessor will check for corrsponding types. +/// you can use type annotations in args and returns to designate types, so coprocessor will check for corresponding types. /// /// Currently support types are `u8`, `u16`, `u32`, `u64`, `i8`, `i16`, `i32`, `i64` and `f16`, `f32`, `f64` /// diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs index 5bef63dc23..f1321d1a0e 100644 --- a/src/script/src/python/coprocessor/compile.rs +++ b/src/script/src/python/coprocessor/compile.rs @@ -67,7 +67,7 @@ fn gen_call(name: &str, deco_args: &DecoratorArgs, loc: &Location) -> ast::Stmt< /// stripe the decorator(`@xxxx`) and type annotation(for type checker is done in rust function), add one line in the ast for call function with given parameter, and compiler into `CodeObject` /// -/// The rationale is that rustpython's vm is not very efficient according to [offical benchmark](https://rustpython.github.io/benchmarks), +/// The rationale is that rustpython's vm is not very efficient according to [official benchmark](https://rustpython.github.io/benchmarks), /// So we should avoid running too much Python Bytecode, hence in this function we delete `@` decorator(instead of actually write a decorator in python) /// And add a function call in the end and also /// strip type annotation @@ -108,8 +108,8 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re } loc = Some(stmt.location); - // This manually construct ast has no corrsponding code - // in the script, so just give it a location that don't exist in orginal script + // This manually construct ast has no corresponding code + // in the script, so just give it a location that don't exist in original script // (which doesn't matter because Location usually only used in pretty print errors) } // Append statement which calling coprocessor function. diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs index 51423c9bc5..50bb0e3264 100644 --- a/src/script/src/python/coprocessor/parse.rs +++ b/src/script/src/python/coprocessor/parse.rs @@ -286,7 +286,7 @@ fn parse_keywords(keywords: &Vec>) -> Result { let s = s.as_str(); if visited_key.contains(s) { return fail_parse_error!( - format!("`{s}` occur multiple times in decorator's arguements' list."), + format!("`{s}` occur multiple times in decorator's arguments' list."), Some(kw.location), ); } @@ -308,7 +308,7 @@ fn parse_keywords(keywords: &Vec>) -> Result { None => { return fail_parse_error!( format!( - "Expect explictly set both `args` and `returns`, found \n{:#?}", + "Expect explicitly set both `args` and `returns`, found \n{:#?}", &kw.node ), Some(kw.location), @@ -365,14 +365,14 @@ fn parse_decorator(decorator: &ast::Expr<()>) -> Result { } } -// get type annotaion in arguments +// get type annotation in arguments fn get_arg_annotations(args: &Arguments) -> Result>> { // get arg types from type annotation> args.args .iter() .map(|arg| { if let Some(anno) = &arg.node.annotation { - // for there is erro handling for parse_annotation + // for there is error handling for parse_annotation parse_annotation(anno).map(Some) } else { Ok(None) @@ -472,7 +472,7 @@ pub fn parse_and_compile_copr(script: &str) -> Result { .collect() }; - // make sure both arguments&returns in fucntion + // make sure both arguments&returns in function // and in decorator have same length ensure!( deco_args.arg_names.len() == arg_types.len(), diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs index 4fe9a717bf..57499befaf 100644 --- a/src/script/src/python/error.rs +++ b/src/script/src/python/error.rs @@ -140,7 +140,7 @@ impl ErrorExt for Error { self } } -// impl from for those error so one can use question mark and implictly cast into `CoprError` +// impl from for those error so one can use question mark and implicitly cast into `CoprError` impl From for Error { fn from(e: DataTypeError) -> Self { Self::TypeCast { source: e } diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs index 1ca3d45097..2ebc3e0ad4 100644 --- a/src/script/src/python/vector.rs +++ b/src/script/src/python/vector.rs @@ -450,7 +450,7 @@ impl PyVector { #[pymethod(magic)] fn rfloordiv(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { if is_pyobj_scalar(&other, vm) { - // FIXME: DataType convert problem, target_type should be infered? + // FIXME: DataType convert problem, target_type should be inferred? self.scalar_arith_op(other, Some(DataType::Int64), arrow2_rfloordiv_scalar, vm) } else { self.arith_op( @@ -482,7 +482,7 @@ impl PyVector { // The Comparable Trait only support normal cmp // (yes there is a slot_richcompare function, but it is not used in anywhere) // so use our own function - // TODO(discord9): test those funciton + // TODO(discord9): test those function #[pymethod(name = "eq")] #[pymethod(magic)] @@ -676,7 +676,7 @@ impl PyVector { } } - /// Unsupport + /// Unsupported /// TODO(discord9): make it work #[allow(unused)] fn setitem_by_index( @@ -689,7 +689,7 @@ impl PyVector { } } -/// get corrsponding arrow op function according to given PyComaprsionOp +/// get corresponding arrow op function according to given PyComaprsionOp /// /// TODO(discord9): impl scalar version function fn get_arrow_op(op: PyComparisonOp) -> impl Fn(&dyn Array, &dyn Array) -> Box { @@ -708,7 +708,7 @@ fn get_arrow_op(op: PyComparisonOp) -> impl Fn(&dyn Array, &dyn Array) -> Box Result; } -struct AccpetTask { +struct AcceptTask { // `abort_handle` and `abort_registration` are used in pairs in shutting down the server. // They work like sender and receiver for aborting stream. When the server is shutting down, // calling `abort_handle.abort()` will "notify" `abort_registration` to stop emitting new @@ -51,7 +51,7 @@ struct AccpetTask { join_handle: Option>, } -impl AccpetTask { +impl AcceptTask { async fn shutdown(&mut self, name: &str) -> Result<()> { match self.join_handle.take() { Some(join_handle) => { @@ -118,7 +118,7 @@ impl AccpetTask { pub(crate) struct BaseTcpServer { name: String, - accept_task: Mutex, + accept_task: Mutex, io_runtime: Arc, } @@ -127,7 +127,7 @@ impl BaseTcpServer { let (abort_handle, registration) = AbortHandle::new_pair(); Self { name: name.into(), - accept_task: Mutex::new(AccpetTask { + accept_task: Mutex::new(AcceptTask { abort_handle, abort_registration: Some(registration), join_handle: None, diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs index ced4f8d053..6744cb824b 100644 --- a/src/sql/src/parser.rs +++ b/src/sql/src/parser.rs @@ -117,7 +117,7 @@ impl<'a> ParserContext<'a> { } /// Parses SHOW statements - /// todo(hl) support `show settings`/`show create`/`show users` ect. + /// todo(hl) support `show settings`/`show create`/`show users` etc. fn parse_show(&mut self) -> Result { if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") { self.parse_show_databases() diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs index 4a44f94f89..59d7d7e855 100644 --- a/src/storage/src/region.rs +++ b/src/storage/src/region.rs @@ -170,7 +170,7 @@ impl RegionImpl { RegionImpl { inner } } - /// Open an exsiting region and recover its data. + /// Open an existing region and recover its data. /// /// The caller should avoid calling this method simultaneously. pub async fn open( diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs index be4095fefd..c0a8cdc464 100644 --- a/src/storage/src/region/writer.rs +++ b/src/storage/src/region/writer.rs @@ -547,7 +547,7 @@ impl WriterInner { let flush_req = FlushJob { max_memtable_id: max_memtable_id.unwrap(), memtables: mem_to_flush, - // In write thread, safe to use current commited sequence. + // In write thread, safe to use current committed sequence. flush_sequence: version_control.committed_sequence(), shared: ctx.shared.clone(), sst_layer: ctx.sst_layer.clone(), diff --git a/src/storage/src/schema/projected.rs b/src/storage/src/schema/projected.rs index ec940a9d34..7f78bf9809 100644 --- a/src/storage/src/schema/projected.rs +++ b/src/storage/src/schema/projected.rs @@ -280,7 +280,7 @@ impl BatchOp for ProjectedSchema { let indices = self.schema_to_read.row_key_indices(); for idx in indices { let (left_col, right_col) = (left.column(idx), right.column(idx)); - // Comparision of vector is done by virtual method calls currently. Consider using + // Comparison of vector is done by virtual method calls currently. Consider using // enum dispatch if this becomes bottleneck. let order = left_col.get_ref(i).cmp(&right_col.get_ref(j)); if order != Ordering::Equal {