diff --git a/Cargo.lock b/Cargo.lock index bd62da4e7a..a23f72e195 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,6 +233,20 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590" +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.6.0" @@ -4420,6 +4434,25 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -5422,6 +5455,7 @@ version = "0.4.0-nightly" dependencies = [ "anymap", "api", + "aquamarine", "async-channel", "async-compat", "async-stream", @@ -9194,6 +9228,7 @@ name = "store-api" version = "0.4.0-nightly" dependencies = [ "api", + "aquamarine", "async-stream", "async-trait", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 73375ac4f3..0749939004 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ edition = "2021" license = "Apache-2.0" [workspace.dependencies] +aquamarine = "0.3" arrow = { version = "43.0" } etcd-client = "0.11" arrow-array = "43.0" diff --git a/src/auth/src/common.rs b/src/auth/src/common.rs index d49f4883b7..3dacbcf4db 100644 --- a/src/auth/src/common.rs +++ b/src/auth/src/common.rs @@ -26,7 +26,7 @@ use crate::{UserInfoRef, UserProviderRef}; pub(crate) const DEFAULT_USERNAME: &str = "greptime"; -/// construct a [`UserInfo`] impl with name +/// construct a [`UserInfo`](crate::user_info::UserInfo) impl with name /// use default username `greptime` if None is provided pub fn userinfo_by_name(username: Option) -> UserInfoRef { DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string())) diff --git a/src/auth/src/user_provider.rs b/src/auth/src/user_provider.rs index 79a15a4ed6..f940ec94b6 100644 --- a/src/auth/src/user_provider.rs +++ b/src/auth/src/user_provider.rs @@ -22,15 +22,15 @@ use crate::UserInfoRef; pub trait UserProvider: Send + Sync { fn name(&self) -> &str; - /// [`authenticate`] checks whether a user is valid and allowed to access the database. + /// Checks whether a user is valid and allowed to access the database. async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result; - /// [`authorize`] checks whether a connection request + /// Checks whether a connection request /// from a certain user to a certain catalog/schema is legal. - /// This method should be called after [`authenticate`]. + /// This method should be called after [authenticate()](UserProvider::authenticate()). async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()>; - /// [`auth`] is a combination of [`authenticate`] and [`authorize`]. + /// Combination of [authenticate()](UserProvider::authenticate()) and [authorize()](UserProvider::authorize()). /// In most cases it's preferred for both convenience and performance. async fn auth( &self, diff --git a/src/catalog/src/kvbackend/manager.rs b/src/catalog/src/kvbackend/manager.rs index ef44d0de4f..80f9f22e35 100644 --- a/src/catalog/src/kvbackend/manager.rs +++ b/src/catalog/src/kvbackend/manager.rs @@ -50,7 +50,7 @@ use crate::CatalogManager; /// /// The result comes from two source, all the user tables are presented in /// a kv-backend which persists the metadata of a table. And system tables -/// comes from [SystemCatalog], which is static and read-only. +/// comes from `SystemCatalog`, which is static and read-only. #[derive(Clone)] pub struct KvBackendCatalogManager { // TODO(LFC): Maybe use a real implementation for Standalone mode. diff --git a/src/common/datasource/src/object_store.rs b/src/common/datasource/src/object_store.rs index ec068c08bc..a07636554f 100644 --- a/src/common/datasource/src/object_store.rs +++ b/src/common/datasource/src/object_store.rs @@ -27,7 +27,7 @@ use crate::error::{self, Result}; pub const FS_SCHEMA: &str = "FS"; pub const S3_SCHEMA: &str = "S3"; -/// Returns (schema, Option, path) +/// Returns `(schema, Option, path)` pub fn parse_url(url: &str) -> Result<(String, Option, String)> { let parsed_url = Url::parse(url); match parsed_url { diff --git a/src/common/function/src/scalars/numpy/interp.rs b/src/common/function/src/scalars/numpy/interp.rs index ed7345a601..0ae9275032 100644 --- a/src/common/function/src/scalars/numpy/interp.rs +++ b/src/common/function/src/scalars/numpy/interp.rs @@ -24,7 +24,7 @@ use datatypes::vectors::{Float64Vector, Vector, VectorRef}; use datatypes::with_match_primitive_type_id; use snafu::{ensure, ResultExt}; -/* search the biggest number that smaller than x in xp */ +/// search the biggest number that smaller than x in xp fn linear_search_ascending_vector(x: Value, xp: &Float64Vector) -> usize { for i in 0..xp.len() { if x < xp.get(i) { @@ -34,7 +34,7 @@ fn linear_search_ascending_vector(x: Value, xp: &Float64Vector) -> usize { xp.len() - 1 } -/* search the biggest number that smaller than x in xp */ +/// search the biggest number that smaller than x in xp fn binary_search_ascending_vector(key: Value, xp: &Float64Vector) -> usize { let mut left = 0; let mut right = xp.len(); @@ -67,7 +67,8 @@ fn concrete_type_to_primitive_vector(arg: &VectorRef) -> Result { }) } -/// https://github.com/numpy/numpy/blob/b101756ac02e390d605b2febcded30a1da50cc2c/numpy/core/src/multiarray/compiled_base.c#L491 +/// One-dimensional linear interpolation for monotonically increasing sample points. Refers to +/// #[allow(unused)] pub fn interp(args: &[VectorRef]) -> Result { let mut left = None; diff --git a/src/common/macro/src/lib.rs b/src/common/macro/src/lib.rs index 61c3bc0edc..d374440ff3 100644 --- a/src/common/macro/src/lib.rs +++ b/src/common/macro/src/lib.rs @@ -23,7 +23,7 @@ use range_fn::process_range_fn; use syn::{parse_macro_input, DeriveInput}; /// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF. -/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator]. +/// This derive macro is expect to be used along with attribute macro [macro@as_aggr_func_creator]. #[proc_macro_derive(AggrFuncTypeStore)] pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); @@ -59,7 +59,7 @@ pub fn as_aggr_func_creator(args: TokenStream, input: TokenStream) -> TokenStrea /// ``` /// /// # Arguments -/// - `name`: The name of the generated [ScalarUDF] struct. +/// - `name`: The name of the generated `ScalarUDF` struct. /// - `ret`: The return type of the generated UDF function. /// - `display_name`: The display name of the generated UDF function. #[proc_macro_attribute] diff --git a/src/common/time/src/interval.rs b/src/common/time/src/interval.rs index fadd9235b7..a7c184c0ec 100644 --- a/src/common/time/src/interval.rs +++ b/src/common/time/src/interval.rs @@ -212,7 +212,7 @@ impl Interval { IntervalFormat::from(self).to_sql_standard_string() } - /// Interval Type and i128[MonthDayNano] Convert + /// Interval Type and i128 [IntervalUnit::MonthDayNano] Convert /// v consists of months(i32) | days(i32) | nsecs(i64) pub fn from_i128(v: i128) -> Self { Interval { @@ -223,7 +223,7 @@ impl Interval { } } - /// `Interval` Type and i64[DayTime] Convert + /// `Interval` Type and i64 [IntervalUnit::DayTime] Convert /// v consists of days(i32) | milliseconds(i32) pub fn from_i64(v: i64) -> Self { Interval { @@ -234,7 +234,7 @@ impl Interval { } } - /// `Interval` Type and i32[YearMonth] Convert + /// `Interval` Type and i32 [IntervalUnit::YearMonth] Convert /// v consists of months(i32) pub fn from_i32(v: i32) -> Self { Interval { @@ -302,7 +302,7 @@ impl Display for Interval { } } -/// https://www.postgresql.org/docs/current/datatype-datetime.html#DATATYPE-INTERVAL-OUTPUT +/// /// support postgres format, iso8601 format and sql standard format #[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)] pub struct IntervalFormat { @@ -392,7 +392,7 @@ impl IntervalFormat { } /// Convert IntervalFormat to sql standard format string - /// SQL standard pattern - [years - months] [days] [hours:minutes:seconds[.fractional seconds]] + /// SQL standard pattern `- [years - months] [days] [hours:minutes:seconds[.fractional seconds]]` /// for example: 1-2 3:4:5.678 pub fn to_sql_standard_string(self) -> String { if self.is_zero() { @@ -425,7 +425,7 @@ impl IntervalFormat { } /// Convert IntervalFormat to postgres format string - /// postgres pattern - [years - months] [days] [hours[:minutes[:seconds[.fractional seconds]]]] + /// postgres pattern `- [years - months] [days] [hours[:minutes[:seconds[.fractional seconds]]]]` /// for example: -1 year -2 mons +3 days -04:05:06 pub fn to_postgres_string(&self) -> String { if self.is_zero() { diff --git a/src/common/time/src/timezone.rs b/src/common/time/src/timezone.rs index 87415d57d6..4b1878c15f 100644 --- a/src/common/time/src/timezone.rs +++ b/src/common/time/src/timezone.rs @@ -52,7 +52,7 @@ impl TimeZone { /// scope. /// /// String examples are available as described in - /// https://dev.mysql.com/doc/refman/8.0/en/time-zone-support.html + /// /// /// - `SYSTEM` /// - Offset to UTC: `+08:00` , `-11:30` diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs index 0d2c096da1..5f5fe9be13 100644 --- a/src/datanode/src/alive_keeper.rs +++ b/src/datanode/src/alive_keeper.rs @@ -38,9 +38,9 @@ use crate::region_server::RegionServer; const MAX_CLOSE_RETRY_TIMES: usize = 10; -/// [RegionAliveKeeper] manages all [CountdownTaskHandle]s. +/// [RegionAliveKeeper] manages all `CountdownTaskHandles`. /// -/// [RegionAliveKeeper] starts a [CountdownTask] for each region. When deadline is reached, +/// [RegionAliveKeeper] starts a `CountdownTask` for each region. When deadline is reached, /// the region will be closed. /// /// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its @@ -56,7 +56,7 @@ pub struct RegionAliveKeeper { heartbeat_interval_millis: u64, started: AtomicBool, - /// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing + /// The epoch when [RegionAliveKeeper] is created. It's used to get a monotonically non-decreasing /// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically /// non-decreasing). The heartbeat request will carry the duration since this epoch, and the /// duration acts like an "invariant point" for region's keep alive lease. diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index d411a49724..0b85eb2e76 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -212,7 +212,7 @@ impl Default for ObjectStoreConfig { #[serde(default)] pub struct RegionManifestConfig { /// Region manifest checkpoint actions margin. - /// Manifest service create a checkpoint every [checkpoint_margin] actions. + /// Manifest service create a checkpoint every `checkpoint_margin` actions. pub checkpoint_margin: Option, /// Region manifest logs and checkpoints gc task execution duration. #[serde(with = "humantime_serde")] diff --git a/src/datatypes/src/scalars.rs b/src/datatypes/src/scalars.rs index 78b8aa4687..ceb38964fb 100644 --- a/src/datatypes/src/scalars.rs +++ b/src/datatypes/src/scalars.rs @@ -35,7 +35,7 @@ fn get_iter_capacity>(iter: &I) -> usize { } /// Owned scalar value -/// primitive types, bool, Vec ... +/// e.g. primitive types, bool, `Vec` ... pub trait Scalar: 'static + Sized + Default + Any where for<'a> Self::VectorType: ScalarVector = Self::RefType<'a>>, diff --git a/src/datatypes/src/vectors/validity.rs b/src/datatypes/src/vectors/validity.rs index 1b7d977dec..eaa32d8aee 100644 --- a/src/datatypes/src/vectors/validity.rs +++ b/src/datatypes/src/vectors/validity.rs @@ -93,7 +93,7 @@ impl Validity { } } - /// The number of null slots on this [`Vector`]. + /// The number of null slots. pub fn null_count(&self) -> usize { match self.kind { ValidityKind::Slots { null_count, .. } => null_count, diff --git a/src/meta-srv/src/failure_detector.rs b/src/meta-srv/src/failure_detector.rs index 205cd22f33..86463a0cf5 100644 --- a/src/meta-srv/src/failure_detector.rs +++ b/src/meta-srv/src/failure_detector.rs @@ -16,10 +16,10 @@ use std::collections::VecDeque; /// This is our port of Akka's "[PhiAccrualFailureDetector](https://github.com/akka/akka/blob/main/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala)" /// You can find it's document here: -/// https://doc.akka.io/docs/akka/current/typed/failure-detector.html +/// /// /// Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their -/// paper: [https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf] +/// paper: /// /// The suspicion level of failure is given by a value called φ (phi). /// The basic idea of the φ failure detector is to express the value of φ on a scale that diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml index 43264ebaf7..37c0ec53d4 100644 --- a/src/mito2/Cargo.toml +++ b/src/mito2/Cargo.toml @@ -11,6 +11,7 @@ test = ["common-test-util"] [dependencies] anymap = "1.0.0-beta.2" api.workspace = true +aquamarine.workspace = true async-channel = "1.9" async-compat = "0.2" async-stream.workspace = true diff --git a/src/mito2/src/manifest/action.rs b/src/mito2/src/manifest/action.rs index 2ed8670b51..a39a89cc73 100644 --- a/src/mito2/src/manifest/action.rs +++ b/src/mito2/src/manifest/action.rs @@ -142,7 +142,7 @@ impl RegionManifestBuilder { self.files.clear(); } - /// Check if the builder keeps a [RegionMetadata](crate::metadata::RegionMetadata). + /// Check if the builder keeps a [RegionMetadata](store_api::metadata::RegionMetadata). pub fn contains_metadata(&self) -> bool { self.metadata.is_some() } diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs index b682e8d2ac..1a742b6b7e 100644 --- a/src/mito2/src/memtable/time_series.rs +++ b/src/mito2/src/memtable/time_series.rs @@ -524,7 +524,7 @@ impl Values { Ok(batch) } - /// Returns a vector of all columns converted to arrow [Array] in [Values]. + /// Returns a vector of all columns converted to arrow [Array](datatypes::arrow::array::Array) in [Values]. fn columns(&self) -> Vec { let mut res = Vec::with_capacity(3 + self.fields.len()); res.push(self.timestamp.to_arrow_array()); diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs index b09d7230f1..8e83a6442a 100644 --- a/src/mito2/src/region_write_ctx.rs +++ b/src/mito2/src/region_write_ctx.rs @@ -115,7 +115,7 @@ impl RegionWriteCtx { } } - /// Push [SenderWriteRequest] to the context. + /// Push mutation to the context. pub(crate) fn push_mutation(&mut self, op_type: i32, rows: Option, tx: OptionOutputTx) { let num_rows = rows.as_ref().map(|rows| rows.rows.len()).unwrap_or(0); self.wal_entry.mutations.push(Mutation { diff --git a/src/partition/src/columns.rs b/src/partition/src/columns.rs index 29bee0cab9..bb1572a60d 100644 --- a/src/partition/src/columns.rs +++ b/src/partition/src/columns.rs @@ -22,7 +22,7 @@ use store_api::storage::RegionNumber; use crate::error::{self, Result}; use crate::partition::{PartitionBound, PartitionExpr, PartitionRule}; -/// A [RangeColumnsPartitionRule] is very similar to [RangePartitionRule] except that it allows +/// A [RangeColumnsPartitionRule] is very similar to [RangePartitionRule](crate::range::RangePartitionRule) except that it allows /// partitioning by multiple columns. /// /// This rule is generated from create table request, using MySQL's syntax: diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs index aa89db6564..6a527f6224 100644 --- a/src/promql/src/functions.rs +++ b/src/promql/src/functions.rs @@ -55,7 +55,7 @@ pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result pub(crate) fn compensated_sum_inc(inc: f64, sum: f64, mut compensation: f64) -> (f64, f64) { let new_sum = sum + inc; if sum.abs() >= inc.abs() { @@ -68,7 +68,7 @@ pub(crate) fn compensated_sum_inc(inc: f64, sum: f64, mut compensation: f64) -> /// linear_regression performs a least-square linear regression analysis on the /// times and values. It return the slope and intercept based on times and values. -/// Prometheus's implementation: https://github.com/prometheus/prometheus/blob/90b2f7a540b8a70d8d81372e6692dcbb67ccbaaa/promql/functions.go#L793-L837 +/// Prometheus's implementation: pub(crate) fn linear_regression( times: &TimestampMillisecondArray, values: &Float64Array, diff --git a/src/promql/src/functions/aggr_over_time.rs b/src/promql/src/functions/aggr_over_time.rs index 05428db47c..f57b2d5612 100644 --- a/src/promql/src/functions/aggr_over_time.rs +++ b/src/promql/src/functions/aggr_over_time.rs @@ -123,7 +123,7 @@ pub fn present_over_time(_: &TimestampMillisecondArray, values: &Float64Array) - /// the population standard variance of the values in the specified interval. /// DataFusion's implementation: -/// https://github.com/apache/arrow-datafusion/blob/292eb954fc0bad3a1febc597233ba26cb60bda3e/datafusion/physical-expr/src/aggregate/variance.rs#L224-#L241 +/// #[range_fn( name = "StdvarOverTime", ret = "Float64Array", @@ -153,7 +153,7 @@ pub fn stdvar_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> } /// the population standard deviation of the values in the specified interval. -/// Prometheus's implementation: https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L556-L569 +/// Prometheus's implementation: #[range_fn( name = "StddevOverTime", ret = "Float64Array", diff --git a/src/promql/src/functions/extrapolate_rate.rs b/src/promql/src/functions/extrapolate_rate.rs index dd394772b2..0506886be3 100644 --- a/src/promql/src/functions/extrapolate_rate.rs +++ b/src/promql/src/functions/extrapolate_rate.rs @@ -48,7 +48,7 @@ pub type Rate = ExtrapolatedRate; pub type Increase = ExtrapolatedRate; /// Part of the `extrapolatedRate` in Promql, -/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L66 +/// from #[derive(Debug)] pub struct ExtrapolatedRate { /// Range duration in millisecond @@ -56,7 +56,7 @@ pub struct ExtrapolatedRate { } impl ExtrapolatedRate { - /// Constructor. Other public usage should use [`scalar_udf`] instead. + /// Constructor. Other public usage should use [scalar_udf()](ExtrapolatedRate::scalar_udf()) instead. fn new(range_length: i64) -> Self { Self { range_length } } diff --git a/src/promql/src/functions/holt_winters.rs b/src/promql/src/functions/holt_winters.rs index 9ef47b7723..4480ccc52d 100644 --- a/src/promql/src/functions/holt_winters.rs +++ b/src/promql/src/functions/holt_winters.rs @@ -152,7 +152,7 @@ fn calc_trend_value(i: usize, tf: f64, s0: f64, s1: f64, b: f64) -> f64 { x + y } -/// Refer to https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L299 +/// Refer to fn holt_winter_impl(values: &[f64], sf: f64, tf: f64) -> Option { if sf.is_nan() || tf.is_nan() || values.is_empty() { return Some(f64::NAN); diff --git a/src/promql/src/functions/idelta.rs b/src/promql/src/functions/idelta.rs index 293ffc7976..cf8ad2d0d6 100644 --- a/src/promql/src/functions/idelta.rs +++ b/src/promql/src/functions/idelta.rs @@ -28,7 +28,7 @@ use crate::functions::extract_array; use crate::range_array::RangeArray; /// The `funcIdelta` in Promql, -/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L235 +/// from #[derive(Debug)] pub struct IDelta {} diff --git a/src/promql/src/functions/quantile.rs b/src/promql/src/functions/quantile.rs index 5118e3fc19..62ff2f2126 100644 --- a/src/promql/src/functions/quantile.rs +++ b/src/promql/src/functions/quantile.rs @@ -128,7 +128,7 @@ impl QuantileOverTime { } } -/// Refer to https://github.com/prometheus/prometheus/blob/6e2905a4d4ff9b47b1f6d201333f5bd53633f921/promql/quantile.go#L357-L386 +/// Refer to fn quantile_impl(values: &[f64], quantile: f64) -> Option { if quantile.is_nan() || values.is_empty() { return Some(f64::NAN); diff --git a/src/query/src/executor.rs b/src/query/src/executor.rs index 8fec5b4a2a..c5d5cd707c 100644 --- a/src/query/src/executor.rs +++ b/src/query/src/executor.rs @@ -20,7 +20,7 @@ use common_recordbatch::SendableRecordBatchStream; use crate::error::Result; use crate::query_engine::QueryEngineContext; -/// Executor to run [ExecutionPlan]. +/// Executor to run [PhysicalPlan]. pub trait QueryExecutor { fn execute_stream( &self, diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs index 4ea5b940d4..299d2949a5 100644 --- a/src/query/src/range_select/plan.rs +++ b/src/query/src/range_select/plan.rs @@ -495,8 +495,8 @@ struct RangeSelectStream { /// key: time series's hash value /// value: time series's state on different align_ts series_map: HashMap, - /// key: (hash of by rows, align_ts) - /// value: [row_ids] + /// key: `(hash of by rows, align_ts)` + /// value: `[row_ids]` /// It is used to record the data that needs to be aggregated in each time slot during the data update process modify_map: HashMap<(u64, Millisecond), Vec>, /// The number of rows of the final output diff --git a/src/script/src/python/rspython/builtins.rs b/src/script/src/python/rspython/builtins.rs index ba4fc953ec..6b5e88f830 100644 --- a/src/script/src/python/rspython/builtins.rs +++ b/src/script/src/python/rspython/builtins.rs @@ -223,7 +223,7 @@ fn all_to_f64(col: DFColValue, vm: &VirtualMachine) -> PyResult { } /// use to bind to Data Fusion's UDF function -/// P.S: seems due to proc macro issues, can't just use #[pyfunction] in here +/// P.S: seems due to proc macro issues, can't just use `#[pyfunction]` in here macro_rules! bind_call_unary_math_function { ($DF_FUNC: ident, $vm: ident $(,$ARG: ident)*) => { fn inner_fn($($ARG: PyObjectRef,)* vm: &VirtualMachine) -> PyResult { diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs index 3307ba55db..5d5582de7f 100644 --- a/src/servers/src/grpc.rs +++ b/src/servers/src/grpc.rs @@ -69,11 +69,11 @@ pub struct GrpcServer { serve_state: Mutex>>>, // handlers - /// Handler for [GreptimeDatabase] service. + /// Handler for [DatabaseService] service. database_handler: Option, /// Handler for Prometheus-compatible PromQL queries ([PrometheusGateway]). Only present for frontend server. prometheus_handler: Option, - /// Handler for [FlightService]. + /// Handler for [FlightService](arrow_flight::flight_service_server::FlightService). flight_handler: Option, /// Handler for [RegionServer]. region_server_handler: Option, diff --git a/src/servers/src/opentsdb/handler.rs b/src/servers/src/opentsdb/handler.rs index fd41297e48..9534f5bc00 100644 --- a/src/servers/src/opentsdb/handler.rs +++ b/src/servers/src/opentsdb/handler.rs @@ -25,7 +25,7 @@ use crate::query_handler::OpentsdbProtocolHandlerRef; use crate::shutdown::Shutdown; /// Per-connection handler. Reads requests from `connection` and applies the OpenTSDB metric to -/// [OpentsdbLineProtocolHandler]. +/// [OpentsdbProtocolHandlerRef]. pub(crate) struct Handler { query_handler: OpentsdbProtocolHandlerRef, diff --git a/src/servers/src/otlp.rs b/src/servers/src/otlp.rs index 2510263ed7..8af21fee7b 100644 --- a/src/servers/src/otlp.rs +++ b/src/servers/src/otlp.rs @@ -26,7 +26,7 @@ const GREPTIME_VALUE: &str = "greptime_value"; /// Normalize otlp instrumentation, metric and attribute names /// -/// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#instrument-name-syntax +/// /// - since the name are case-insensitive, we transform them to lowercase for /// better sql usability /// - replace `.` and `-` with `_` @@ -37,7 +37,7 @@ fn normalize_otlp_name(name: &str) -> String { /// Convert OpenTelemetry metrics to GreptimeDB insert requests /// /// See -/// https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L162 +/// /// for data structure of OTLP metrics. /// /// Returns `InsertRequests` and total number of rows to ingest diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs index 494e650549..2a745973ee 100644 --- a/src/servers/src/prom_store.rs +++ b/src/servers/src/prom_store.rs @@ -189,7 +189,7 @@ impl PartialOrd for TimeSeriesId { } /// Collect each row's timeseries id -/// This processing is ugly, hope https://github.com/GreptimeTeam/greptimedb/issues/336 making some progress in future. +/// This processing is ugly, hope making some progress in future. fn collect_timeseries_ids(table_name: &str, recordbatch: &RecordBatch) -> Vec { let row_count = recordbatch.num_rows(); let mut timeseries_ids = Vec::with_capacity(row_count); diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs index d77f7d926d..5b8db3ff40 100644 --- a/src/sql/src/parsers/tql_parser.rs +++ b/src/sql/src/parsers/tql_parser.rs @@ -29,9 +29,9 @@ const EXPLAIN: &str = "EXPLAIN"; use sqlparser::parser::Parser; /// TQL extension parser, including: -/// - TQL EVAL -/// - TQL EXPLAIN -/// - TQL ANALYZE +/// - `TQL EVAL ` +/// - `TQL EXPLAIN ` +/// - `TQL ANALYZE ` impl<'a> ParserContext<'a> { pub(crate) fn parse_tql(&mut self) -> Result { let _ = self.parser.next_token(); diff --git a/src/sql/src/parsers/truncate_parser.rs b/src/sql/src/parsers/truncate_parser.rs index 2f6fcd7af4..e3a31f2db2 100644 --- a/src/sql/src/parsers/truncate_parser.rs +++ b/src/sql/src/parsers/truncate_parser.rs @@ -20,7 +20,7 @@ use crate::parser::ParserContext; use crate::statements::statement::Statement; use crate::statements::truncate::TruncateTable; -/// TRUNCATE [TABLE] table_name; +/// `TRUNCATE [TABLE] table_name;` impl<'a> ParserContext<'a> { pub(crate) fn parse_truncate(&mut self) -> Result { let _ = self.parser.next_token(); diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs index ebb41d7af9..05c8116954 100644 --- a/src/storage/src/flush.rs +++ b/src/storage/src/flush.rs @@ -106,7 +106,7 @@ impl SizeBasedStrategy { /// Returns whether to trigger an engine level flush. /// /// Inspired by RocksDB's WriteBufferManager. - /// https://github.com/facebook/rocksdb/blob/main/include/rocksdb/write_buffer_manager.h#L94 + /// fn should_flush_engine(&self) -> bool { // We only check global limit when it is Some. let Some(global_write_buffer_size) = self.global_write_buffer_size else { diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs index 620cbc7af3..6a9b269e18 100644 --- a/src/storage/src/memtable.rs +++ b/src/storage/src/memtable.rs @@ -44,7 +44,7 @@ pub type MemtableId = u32; #[derive(Debug, Default)] pub struct MemtableStats { /// The estimated bytes allocated by this memtable from heap. Result - /// of this method may be larger than the estimated based on [`num_rows`] because + /// of this method may be larger than the estimated based on `num_rows` because /// of the implementor's pre-alloc behavior. pub estimated_bytes: usize, /// The max timestamp that this memtable contains. @@ -267,7 +267,7 @@ impl Drop for AllocTracker { } } -/// Default memtable builder that builds [BTreeMemtable]. +/// Default memtable builder that builds `BTreeMemtable`. #[derive(Debug, Default)] pub struct DefaultMemtableBuilder { memtable_id: AtomicU32, diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs index 3e18a4063a..5192451942 100644 --- a/src/storage/src/memtable/inserter.rs +++ b/src/storage/src/memtable/inserter.rs @@ -21,7 +21,7 @@ use crate::memtable::KeyValues; use crate::metrics::MEMTABLE_WRITE_ELAPSED; use crate::write_batch::{Mutation, Payload}; -/// Wraps logic of inserting key/values in [WriteBatch] to [Memtable]. +/// Wraps logic of inserting key/values in [WriteBatch](crate::write_batch::WriteBatch) to [Memtable](crate::memtable::Memtable). pub struct Inserter { /// Sequence of the batch to be inserted. sequence: SequenceNumber, @@ -39,7 +39,7 @@ impl Inserter { /// Insert write batch payload into memtable. /// - /// Won't do schema validation if not configured. Caller (mostly the [`RegionWriter`]) should ensure the + /// Won't do schema validation if not configured. Caller (mostly the `RegionWriter` should ensure the /// schemas of `memtable` are consistent with `payload`'s. pub fn insert_memtable(&mut self, payload: &Payload, memtable: &MemtableRef) -> Result<()> { let _timer = common_telemetry::timer!(MEMTABLE_WRITE_ELAPSED); diff --git a/src/storage/src/read.rs b/src/storage/src/read.rs index 7f3637da43..fb8710c05f 100644 --- a/src/storage/src/read.rs +++ b/src/storage/src/read.rs @@ -140,7 +140,7 @@ pub trait BatchOp { /// being masked out. fn filter(&self, batch: &Batch, filter: &BooleanVector) -> Result; - /// Unselect deleted rows according to the [`OpType`](store_api::storage::OpType). + /// Unselect deleted rows according to the [`OpType`](api::v1::OpType). /// /// # Panics /// Panics if diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml index 059efa54be..b0710a76e8 100644 --- a/src/store-api/Cargo.toml +++ b/src/store-api/Cargo.toml @@ -6,6 +6,7 @@ license.workspace = true [dependencies] api = { workspace = true } +aquamarine.workspace = true async-trait.workspace = true bytes = "1.1" common-base = { workspace = true } diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs index 1638477be2..0798478a1e 100644 --- a/src/store-api/src/metadata.rs +++ b/src/store-api/src/metadata.rs @@ -50,7 +50,7 @@ pub struct ColumnMetadata { } impl ColumnMetadata { - /// Construct `Self` from protobuf struct [ColumnDef] + /// Construct `Self` from protobuf struct [RegionColumnDef] pub fn try_from_column_def(column_def: RegionColumnDef) -> Result { let column_id = column_def.column_id;