docs: fix cargo doc errors and warnings (#2421)

* docs: fix cargo doc warnings and errors

* docs: fix warnings

* docs: fix warnings

* chore: rm src/common/function-macro/src/lib.rs
This commit is contained in:
Yingwen
2023-09-17 19:45:15 +08:00
committed by GitHub
parent 92824d1c66
commit 4a82926d72
41 changed files with 100 additions and 61 deletions

35
Cargo.lock generated
View File

@@ -233,6 +233,20 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
[[package]]
name = "aquamarine"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1"
dependencies = [
"include_dir",
"itertools 0.10.5",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "arc-swap"
version = "1.6.0"
@@ -4420,6 +4434,25 @@ dependencies = [
"hashbrown 0.12.3",
]
[[package]]
name = "include_dir"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e"
dependencies = [
"include_dir_macros",
]
[[package]]
name = "include_dir_macros"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f"
dependencies = [
"proc-macro2",
"quote",
]
[[package]]
name = "indexmap"
version = "1.9.3"
@@ -5422,6 +5455,7 @@ version = "0.4.0-nightly"
dependencies = [
"anymap",
"api",
"aquamarine",
"async-channel",
"async-compat",
"async-stream",
@@ -9194,6 +9228,7 @@ name = "store-api"
version = "0.4.0-nightly"
dependencies = [
"api",
"aquamarine",
"async-stream",
"async-trait",
"bytes",

View File

@@ -61,6 +61,7 @@ edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
aquamarine = "0.3"
arrow = { version = "43.0" }
etcd-client = "0.11"
arrow-array = "43.0"

View File

@@ -26,7 +26,7 @@ use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
/// construct a [`UserInfo`] impl with name
/// construct a [`UserInfo`](crate::user_info::UserInfo) impl with name
/// use default username `greptime` if None is provided
pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))

View File

@@ -22,15 +22,15 @@ use crate::UserInfoRef;
pub trait UserProvider: Send + Sync {
fn name(&self) -> &str;
/// [`authenticate`] checks whether a user is valid and allowed to access the database.
/// Checks whether a user is valid and allowed to access the database.
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef>;
/// [`authorize`] checks whether a connection request
/// Checks whether a connection request
/// from a certain user to a certain catalog/schema is legal.
/// This method should be called after [`authenticate`].
/// This method should be called after [authenticate()](UserProvider::authenticate()).
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()>;
/// [`auth`] is a combination of [`authenticate`] and [`authorize`].
/// Combination of [authenticate()](UserProvider::authenticate()) and [authorize()](UserProvider::authorize()).
/// In most cases it's preferred for both convenience and performance.
async fn auth(
&self,

View File

@@ -50,7 +50,7 @@ use crate::CatalogManager;
///
/// The result comes from two source, all the user tables are presented in
/// a kv-backend which persists the metadata of a table. And system tables
/// comes from [SystemCatalog], which is static and read-only.
/// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)]
pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.

View File

@@ -27,7 +27,7 @@ use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// Returns (schema, Option<host>, path)
/// Returns `(schema, Option<host>, path)`
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {

View File

@@ -24,7 +24,7 @@ use datatypes::vectors::{Float64Vector, Vector, VectorRef};
use datatypes::with_match_primitive_type_id;
use snafu::{ensure, ResultExt};
/* search the biggest number that smaller than x in xp */
/// search the biggest number that smaller than x in xp
fn linear_search_ascending_vector(x: Value, xp: &Float64Vector) -> usize {
for i in 0..xp.len() {
if x < xp.get(i) {
@@ -34,7 +34,7 @@ fn linear_search_ascending_vector(x: Value, xp: &Float64Vector) -> usize {
xp.len() - 1
}
/* search the biggest number that smaller than x in xp */
/// search the biggest number that smaller than x in xp
fn binary_search_ascending_vector(key: Value, xp: &Float64Vector) -> usize {
let mut left = 0;
let mut right = xp.len();
@@ -67,7 +67,8 @@ fn concrete_type_to_primitive_vector(arg: &VectorRef) -> Result<Float64Vector> {
})
}
/// https://github.com/numpy/numpy/blob/b101756ac02e390d605b2febcded30a1da50cc2c/numpy/core/src/multiarray/compiled_base.c#L491
/// One-dimensional linear interpolation for monotonically increasing sample points. Refers to
/// <https://github.com/numpy/numpy/blob/b101756ac02e390d605b2febcded30a1da50cc2c/numpy/core/src/multiarray/compiled_base.c#L491>
#[allow(unused)]
pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
let mut left = None;

View File

@@ -23,7 +23,7 @@ use range_fn::process_range_fn;
use syn::{parse_macro_input, DeriveInput};
/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
/// This derive macro is expect to be used along with attribute macro [macro@as_aggr_func_creator].
#[proc_macro_derive(AggrFuncTypeStore)]
pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as DeriveInput);
@@ -59,7 +59,7 @@ pub fn as_aggr_func_creator(args: TokenStream, input: TokenStream) -> TokenStrea
/// ```
///
/// # Arguments
/// - `name`: The name of the generated [ScalarUDF] struct.
/// - `name`: The name of the generated `ScalarUDF` struct.
/// - `ret`: The return type of the generated UDF function.
/// - `display_name`: The display name of the generated UDF function.
#[proc_macro_attribute]

View File

@@ -212,7 +212,7 @@ impl Interval {
IntervalFormat::from(self).to_sql_standard_string()
}
/// Interval Type and i128[MonthDayNano] Convert
/// Interval Type and i128 [IntervalUnit::MonthDayNano] Convert
/// v consists of months(i32) | days(i32) | nsecs(i64)
pub fn from_i128(v: i128) -> Self {
Interval {
@@ -223,7 +223,7 @@ impl Interval {
}
}
/// `Interval` Type and i64[DayTime] Convert
/// `Interval` Type and i64 [IntervalUnit::DayTime] Convert
/// v consists of days(i32) | milliseconds(i32)
pub fn from_i64(v: i64) -> Self {
Interval {
@@ -234,7 +234,7 @@ impl Interval {
}
}
/// `Interval` Type and i32[YearMonth] Convert
/// `Interval` Type and i32 [IntervalUnit::YearMonth] Convert
/// v consists of months(i32)
pub fn from_i32(v: i32) -> Self {
Interval {
@@ -302,7 +302,7 @@ impl Display for Interval {
}
}
/// https://www.postgresql.org/docs/current/datatype-datetime.html#DATATYPE-INTERVAL-OUTPUT
/// <https://www.postgresql.org/docs/current/datatype-datetime.html#DATATYPE-INTERVAL-OUTPUT>
/// support postgres format, iso8601 format and sql standard format
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct IntervalFormat {
@@ -392,7 +392,7 @@ impl IntervalFormat {
}
/// Convert IntervalFormat to sql standard format string
/// SQL standard pattern - [years - months] [days] [hours:minutes:seconds[.fractional seconds]]
/// SQL standard pattern `- [years - months] [days] [hours:minutes:seconds[.fractional seconds]]`
/// for example: 1-2 3:4:5.678
pub fn to_sql_standard_string(self) -> String {
if self.is_zero() {
@@ -425,7 +425,7 @@ impl IntervalFormat {
}
/// Convert IntervalFormat to postgres format string
/// postgres pattern - [years - months] [days] [hours[:minutes[:seconds[.fractional seconds]]]]
/// postgres pattern `- [years - months] [days] [hours[:minutes[:seconds[.fractional seconds]]]]`
/// for example: -1 year -2 mons +3 days -04:05:06
pub fn to_postgres_string(&self) -> String {
if self.is_zero() {

View File

@@ -52,7 +52,7 @@ impl TimeZone {
/// scope.
///
/// String examples are available as described in
/// https://dev.mysql.com/doc/refman/8.0/en/time-zone-support.html
/// <https://dev.mysql.com/doc/refman/8.0/en/time-zone-support.html>
///
/// - `SYSTEM`
/// - Offset to UTC: `+08:00` , `-11:30`

View File

@@ -38,9 +38,9 @@ use crate::region_server::RegionServer;
const MAX_CLOSE_RETRY_TIMES: usize = 10;
/// [RegionAliveKeeper] manages all [CountdownTaskHandle]s.
/// [RegionAliveKeeper] manages all `CountdownTaskHandles`.
///
/// [RegionAliveKeeper] starts a [CountdownTask] for each region. When deadline is reached,
/// [RegionAliveKeeper] starts a `CountdownTask` for each region. When deadline is reached,
/// the region will be closed.
///
/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
@@ -56,7 +56,7 @@ pub struct RegionAliveKeeper {
heartbeat_interval_millis: u64,
started: AtomicBool,
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
/// The epoch when [RegionAliveKeeper] is created. It's used to get a monotonically non-decreasing
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
/// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
/// duration acts like an "invariant point" for region's keep alive lease.

View File

@@ -212,7 +212,7 @@ impl Default for ObjectStoreConfig {
#[serde(default)]
pub struct RegionManifestConfig {
/// Region manifest checkpoint actions margin.
/// Manifest service create a checkpoint every [checkpoint_margin] actions.
/// Manifest service create a checkpoint every `checkpoint_margin` actions.
pub checkpoint_margin: Option<u16>,
/// Region manifest logs and checkpoints gc task execution duration.
#[serde(with = "humantime_serde")]

View File

@@ -35,7 +35,7 @@ fn get_iter_capacity<T, I: Iterator<Item = T>>(iter: &I) -> usize {
}
/// Owned scalar value
/// primitive types, bool, Vec<u8> ...
/// e.g. primitive types, bool, `Vec<u8>` ...
pub trait Scalar: 'static + Sized + Default + Any
where
for<'a> Self::VectorType: ScalarVector<RefItem<'a> = Self::RefType<'a>>,

View File

@@ -93,7 +93,7 @@ impl Validity {
}
}
/// The number of null slots on this [`Vector`].
/// The number of null slots.
pub fn null_count(&self) -> usize {
match self.kind {
ValidityKind::Slots { null_count, .. } => null_count,

View File

@@ -16,10 +16,10 @@ use std::collections::VecDeque;
/// This is our port of Akka's "[PhiAccrualFailureDetector](https://github.com/akka/akka/blob/main/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala)"
/// You can find it's document here:
/// https://doc.akka.io/docs/akka/current/typed/failure-detector.html
/// <https://doc.akka.io/docs/akka/current/typed/failure-detector.html>
///
/// Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their
/// paper: [https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf]
/// paper: <https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf>
///
/// The suspicion level of failure is given by a value called φ (phi).
/// The basic idea of the φ failure detector is to express the value of φ on a scale that

View File

@@ -11,6 +11,7 @@ test = ["common-test-util"]
[dependencies]
anymap = "1.0.0-beta.2"
api.workspace = true
aquamarine.workspace = true
async-channel = "1.9"
async-compat = "0.2"
async-stream.workspace = true

View File

@@ -142,7 +142,7 @@ impl RegionManifestBuilder {
self.files.clear();
}
/// Check if the builder keeps a [RegionMetadata](crate::metadata::RegionMetadata).
/// Check if the builder keeps a [RegionMetadata](store_api::metadata::RegionMetadata).
pub fn contains_metadata(&self) -> bool {
self.metadata.is_some()
}

View File

@@ -524,7 +524,7 @@ impl Values {
Ok(batch)
}
/// Returns a vector of all columns converted to arrow [Array] in [Values].
/// Returns a vector of all columns converted to arrow [Array](datatypes::arrow::array::Array) in [Values].
fn columns(&self) -> Vec<ArrayRef> {
let mut res = Vec::with_capacity(3 + self.fields.len());
res.push(self.timestamp.to_arrow_array());

View File

@@ -115,7 +115,7 @@ impl RegionWriteCtx {
}
}
/// Push [SenderWriteRequest] to the context.
/// Push mutation to the context.
pub(crate) fn push_mutation(&mut self, op_type: i32, rows: Option<Rows>, tx: OptionOutputTx) {
let num_rows = rows.as_ref().map(|rows| rows.rows.len()).unwrap_or(0);
self.wal_entry.mutations.push(Mutation {

View File

@@ -22,7 +22,7 @@ use store_api::storage::RegionNumber;
use crate::error::{self, Result};
use crate::partition::{PartitionBound, PartitionExpr, PartitionRule};
/// A [RangeColumnsPartitionRule] is very similar to [RangePartitionRule] except that it allows
/// A [RangeColumnsPartitionRule] is very similar to [RangePartitionRule](crate::range::RangePartitionRule) except that it allows
/// partitioning by multiple columns.
///
/// This rule is generated from create table request, using MySQL's syntax:

View File

@@ -55,7 +55,7 @@ pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef,
/// that reduces the numerical error further in cases
/// where the numbers being summed have a large difference in magnitude
/// Prometheus's implementation:
/// https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L782)
/// <https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L782>
pub(crate) fn compensated_sum_inc(inc: f64, sum: f64, mut compensation: f64) -> (f64, f64) {
let new_sum = sum + inc;
if sum.abs() >= inc.abs() {
@@ -68,7 +68,7 @@ pub(crate) fn compensated_sum_inc(inc: f64, sum: f64, mut compensation: f64) ->
/// linear_regression performs a least-square linear regression analysis on the
/// times and values. It return the slope and intercept based on times and values.
/// Prometheus's implementation: https://github.com/prometheus/prometheus/blob/90b2f7a540b8a70d8d81372e6692dcbb67ccbaaa/promql/functions.go#L793-L837
/// Prometheus's implementation: <https://github.com/prometheus/prometheus/blob/90b2f7a540b8a70d8d81372e6692dcbb67ccbaaa/promql/functions.go#L793-L837>
pub(crate) fn linear_regression(
times: &TimestampMillisecondArray,
values: &Float64Array,

View File

@@ -123,7 +123,7 @@ pub fn present_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -
/// the population standard variance of the values in the specified interval.
/// DataFusion's implementation:
/// https://github.com/apache/arrow-datafusion/blob/292eb954fc0bad3a1febc597233ba26cb60bda3e/datafusion/physical-expr/src/aggregate/variance.rs#L224-#L241
/// <https://github.com/apache/arrow-datafusion/blob/292eb954fc0bad3a1febc597233ba26cb60bda3e/datafusion/physical-expr/src/aggregate/variance.rs#L224-#L241>
#[range_fn(
name = "StdvarOverTime",
ret = "Float64Array",
@@ -153,7 +153,7 @@ pub fn stdvar_over_time(_: &TimestampMillisecondArray, values: &Float64Array) ->
}
/// the population standard deviation of the values in the specified interval.
/// Prometheus's implementation: https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L556-L569
/// Prometheus's implementation: <https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L556-L569>
#[range_fn(
name = "StddevOverTime",
ret = "Float64Array",

View File

@@ -48,7 +48,7 @@ pub type Rate = ExtrapolatedRate<true, true>;
pub type Increase = ExtrapolatedRate<true, false>;
/// Part of the `extrapolatedRate` in Promql,
/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L66
/// from <https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L66>
#[derive(Debug)]
pub struct ExtrapolatedRate<const IS_COUNTER: bool, const IS_RATE: bool> {
/// Range duration in millisecond
@@ -56,7 +56,7 @@ pub struct ExtrapolatedRate<const IS_COUNTER: bool, const IS_RATE: bool> {
}
impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, IS_RATE> {
/// Constructor. Other public usage should use [`scalar_udf`] instead.
/// Constructor. Other public usage should use [scalar_udf()](ExtrapolatedRate::scalar_udf()) instead.
fn new(range_length: i64) -> Self {
Self { range_length }
}

View File

@@ -152,7 +152,7 @@ fn calc_trend_value(i: usize, tf: f64, s0: f64, s1: f64, b: f64) -> f64 {
x + y
}
/// Refer to https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L299
/// Refer to <https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L299>
fn holt_winter_impl(values: &[f64], sf: f64, tf: f64) -> Option<f64> {
if sf.is_nan() || tf.is_nan() || values.is_empty() {
return Some(f64::NAN);

View File

@@ -28,7 +28,7 @@ use crate::functions::extract_array;
use crate::range_array::RangeArray;
/// The `funcIdelta` in Promql,
/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L235
/// from <https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L235>
#[derive(Debug)]
pub struct IDelta<const IS_RATE: bool> {}

View File

@@ -128,7 +128,7 @@ impl QuantileOverTime {
}
}
/// Refer to https://github.com/prometheus/prometheus/blob/6e2905a4d4ff9b47b1f6d201333f5bd53633f921/promql/quantile.go#L357-L386
/// Refer to <https://github.com/prometheus/prometheus/blob/6e2905a4d4ff9b47b1f6d201333f5bd53633f921/promql/quantile.go#L357-L386>
fn quantile_impl(values: &[f64], quantile: f64) -> Option<f64> {
if quantile.is_nan() || values.is_empty() {
return Some(f64::NAN);

View File

@@ -20,7 +20,7 @@ use common_recordbatch::SendableRecordBatchStream;
use crate::error::Result;
use crate::query_engine::QueryEngineContext;
/// Executor to run [ExecutionPlan].
/// Executor to run [PhysicalPlan].
pub trait QueryExecutor {
fn execute_stream(
&self,

View File

@@ -495,8 +495,8 @@ struct RangeSelectStream {
/// key: time series's hash value
/// value: time series's state on different align_ts
series_map: HashMap<u64, SeriesState>,
/// key: (hash of by rows, align_ts)
/// value: [row_ids]
/// key: `(hash of by rows, align_ts)`
/// value: `[row_ids]`
/// It is used to record the data that needs to be aggregated in each time slot during the data update process
modify_map: HashMap<(u64, Millisecond), Vec<u32>>,
/// The number of rows of the final output

View File

@@ -223,7 +223,7 @@ fn all_to_f64(col: DFColValue, vm: &VirtualMachine) -> PyResult<DFColValue> {
}
/// use to bind to Data Fusion's UDF function
/// P.S: seems due to proc macro issues, can't just use #[pyfunction] in here
/// P.S: seems due to proc macro issues, can't just use `#[pyfunction]` in here
macro_rules! bind_call_unary_math_function {
($DF_FUNC: ident, $vm: ident $(,$ARG: ident)*) => {
fn inner_fn($($ARG: PyObjectRef,)* vm: &VirtualMachine) -> PyResult<PyObjectRef> {

View File

@@ -69,11 +69,11 @@ pub struct GrpcServer {
serve_state: Mutex<Option<Receiver<Result<()>>>>,
// handlers
/// Handler for [GreptimeDatabase] service.
/// Handler for [DatabaseService] service.
database_handler: Option<GreptimeRequestHandler>,
/// Handler for Prometheus-compatible PromQL queries ([PrometheusGateway]). Only present for frontend server.
prometheus_handler: Option<PrometheusHandlerRef>,
/// Handler for [FlightService].
/// Handler for [FlightService](arrow_flight::flight_service_server::FlightService).
flight_handler: Option<FlightCraftRef>,
/// Handler for [RegionServer].
region_server_handler: Option<RegionServerRequestHandler>,

View File

@@ -25,7 +25,7 @@ use crate::query_handler::OpentsdbProtocolHandlerRef;
use crate::shutdown::Shutdown;
/// Per-connection handler. Reads requests from `connection` and applies the OpenTSDB metric to
/// [OpentsdbLineProtocolHandler].
/// [OpentsdbProtocolHandlerRef].
pub(crate) struct Handler<S: AsyncWrite + AsyncRead + Unpin> {
query_handler: OpentsdbProtocolHandlerRef,

View File

@@ -26,7 +26,7 @@ const GREPTIME_VALUE: &str = "greptime_value";
/// Normalize otlp instrumentation, metric and attribute names
///
/// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#instrument-name-syntax
/// <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#instrument-name-syntax>
/// - since the name are case-insensitive, we transform them to lowercase for
/// better sql usability
/// - replace `.` and `-` with `_`
@@ -37,7 +37,7 @@ fn normalize_otlp_name(name: &str) -> String {
/// Convert OpenTelemetry metrics to GreptimeDB insert requests
///
/// See
/// https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L162
/// <https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L162>
/// for data structure of OTLP metrics.
///
/// Returns `InsertRequests` and total number of rows to ingest

View File

@@ -189,7 +189,7 @@ impl PartialOrd for TimeSeriesId {
}
/// Collect each row's timeseries id
/// This processing is ugly, hope https://github.com/GreptimeTeam/greptimedb/issues/336 making some progress in future.
/// This processing is ugly, hope <https://github.com/GreptimeTeam/greptimedb/issues/336> making some progress in future.
fn collect_timeseries_ids(table_name: &str, recordbatch: &RecordBatch) -> Vec<TimeSeriesId> {
let row_count = recordbatch.num_rows();
let mut timeseries_ids = Vec::with_capacity(row_count);

View File

@@ -29,9 +29,9 @@ const EXPLAIN: &str = "EXPLAIN";
use sqlparser::parser::Parser;
/// TQL extension parser, including:
/// - TQL EVAL <query>
/// - TQL EXPLAIN <query>
/// - TQL ANALYZE <query>
/// - `TQL EVAL <query>`
/// - `TQL EXPLAIN <query>`
/// - `TQL ANALYZE <query>`
impl<'a> ParserContext<'a> {
pub(crate) fn parse_tql(&mut self) -> Result<Statement> {
let _ = self.parser.next_token();

View File

@@ -20,7 +20,7 @@ use crate::parser::ParserContext;
use crate::statements::statement::Statement;
use crate::statements::truncate::TruncateTable;
/// TRUNCATE [TABLE] table_name;
/// `TRUNCATE [TABLE] table_name;`
impl<'a> ParserContext<'a> {
pub(crate) fn parse_truncate(&mut self) -> Result<Statement> {
let _ = self.parser.next_token();

View File

@@ -106,7 +106,7 @@ impl SizeBasedStrategy {
/// Returns whether to trigger an engine level flush.
///
/// Inspired by RocksDB's WriteBufferManager.
/// https://github.com/facebook/rocksdb/blob/main/include/rocksdb/write_buffer_manager.h#L94
/// <https://github.com/facebook/rocksdb/blob/main/include/rocksdb/write_buffer_manager.h#L94>
fn should_flush_engine(&self) -> bool {
// We only check global limit when it is Some.
let Some(global_write_buffer_size) = self.global_write_buffer_size else {

View File

@@ -44,7 +44,7 @@ pub type MemtableId = u32;
#[derive(Debug, Default)]
pub struct MemtableStats {
/// The estimated bytes allocated by this memtable from heap. Result
/// of this method may be larger than the estimated based on [`num_rows`] because
/// of this method may be larger than the estimated based on `num_rows` because
/// of the implementor's pre-alloc behavior.
pub estimated_bytes: usize,
/// The max timestamp that this memtable contains.
@@ -267,7 +267,7 @@ impl Drop for AllocTracker {
}
}
/// Default memtable builder that builds [BTreeMemtable].
/// Default memtable builder that builds `BTreeMemtable`.
#[derive(Debug, Default)]
pub struct DefaultMemtableBuilder {
memtable_id: AtomicU32,

View File

@@ -21,7 +21,7 @@ use crate::memtable::KeyValues;
use crate::metrics::MEMTABLE_WRITE_ELAPSED;
use crate::write_batch::{Mutation, Payload};
/// Wraps logic of inserting key/values in [WriteBatch] to [Memtable].
/// Wraps logic of inserting key/values in [WriteBatch](crate::write_batch::WriteBatch) to [Memtable](crate::memtable::Memtable).
pub struct Inserter {
/// Sequence of the batch to be inserted.
sequence: SequenceNumber,
@@ -39,7 +39,7 @@ impl Inserter {
/// Insert write batch payload into memtable.
///
/// Won't do schema validation if not configured. Caller (mostly the [`RegionWriter`]) should ensure the
/// Won't do schema validation if not configured. Caller (mostly the `RegionWriter` should ensure the
/// schemas of `memtable` are consistent with `payload`'s.
pub fn insert_memtable(&mut self, payload: &Payload, memtable: &MemtableRef) -> Result<()> {
let _timer = common_telemetry::timer!(MEMTABLE_WRITE_ELAPSED);

View File

@@ -140,7 +140,7 @@ pub trait BatchOp {
/// being masked out.
fn filter(&self, batch: &Batch, filter: &BooleanVector) -> Result<Batch>;
/// Unselect deleted rows according to the [`OpType`](store_api::storage::OpType).
/// Unselect deleted rows according to the [`OpType`](api::v1::OpType).
///
/// # Panics
/// Panics if

View File

@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
api = { workspace = true }
aquamarine.workspace = true
async-trait.workspace = true
bytes = "1.1"
common-base = { workspace = true }

View File

@@ -50,7 +50,7 @@ pub struct ColumnMetadata {
}
impl ColumnMetadata {
/// Construct `Self` from protobuf struct [ColumnDef]
/// Construct `Self` from protobuf struct [RegionColumnDef]
pub fn try_from_column_def(column_def: RegionColumnDef) -> Result<Self> {
let column_id = column_def.column_id;