mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
chore: upgrade toolchain to nightly-2024-08-07 (#4549)
* chore: upgrade toolchain to `nightly-2024-08-07` * chore(ci): upgrade toolchain * fix: fix unit test
This commit is contained in:
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
RUST_TOOLCHAIN: nightly-2024-08-07
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
2
.github/workflows/develop.yml
vendored
2
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
RUST_TOOLCHAIN: nightly-2024-08-07
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
|
||||
2
.github/workflows/nightly-ci.yml
vendored
2
.github/workflows/nightly-ci.yml
vendored
@@ -10,7 +10,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
RUST_TOOLCHAIN: nightly-2024-08-07
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
RUST_TOOLCHAIN: nightly-2024-08-07
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
|
||||
4
Cargo.lock
generated
4
Cargo.lock
generated
@@ -4568,9 +4568,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||
|
||||
[[package]]
|
||||
name = "human-panic"
|
||||
version = "1.2.3"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4f016c89920bbb30951a8405ecacbb4540db5524313b9445736e7e1855cf370"
|
||||
checksum = "1c5a08ed290eac04006e21e63d32e90086b6182c7cd0452d10f4264def1fec9a"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
|
||||
@@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-20"
|
||||
channel = "nightly-2024-08-07"
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
human-panic = "2.0"
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
|
||||
@@ -139,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
let metadata = human_panic::Metadata {
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
name: "GreptimeDB".into(),
|
||||
authors: Default::default(),
|
||||
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
|
||||
};
|
||||
human_panic::setup_panic!(metadata);
|
||||
human_panic::setup_panic!(
|
||||
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||
);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct FunctionContext {
|
||||
|
||||
impl FunctionContext {
|
||||
/// Create a mock [`FunctionContext`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
#[cfg(test)]
|
||||
pub fn mock() -> Self {
|
||||
Self {
|
||||
query_ctx: QueryContextBuilder::default().build().into(),
|
||||
|
||||
@@ -75,7 +75,7 @@ where
|
||||
// to keep the not_greater length == floor+1
|
||||
// so to ensure the peek of the not_greater is array[floor]
|
||||
// and the peek of the greater is array[floor+1]
|
||||
let p = if let Some(p) = self.p { p } else { 0.0_f64 };
|
||||
let p = self.p.unwrap_or(0.0_f64);
|
||||
let floor = (((self.n - 1) as f64) * p / (100_f64)).floor();
|
||||
if value <= *self.not_greater.peek().unwrap() {
|
||||
self.not_greater.push(value);
|
||||
|
||||
@@ -245,7 +245,7 @@ mod test {
|
||||
];
|
||||
scipy_stats_norm_pdf.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(0.17843340219081558),
|
||||
Value::from(0.17843340219081552),
|
||||
scipy_stats_norm_pdf.evaluate().unwrap()
|
||||
);
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ pub struct FunctionState {
|
||||
|
||||
impl FunctionState {
|
||||
/// Create a mock [`FunctionState`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
#[cfg(test)]
|
||||
pub fn mock() -> Self {
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
@@ -76,6 +76,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument,
|
||||
/// - `&QueryContextRef` as the second argument, and
|
||||
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
|
||||
///
|
||||
/// Return type must be `common_query::error::Result<Value>`.
|
||||
///
|
||||
/// # Example see `common/function/src/system/procedure_state.rs`.
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(lazy_cell)]
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
//! It also contains definition of expression, adapter and plan, and internal state management.
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(duration_abs_diff)]
|
||||
#![allow(dead_code)]
|
||||
#![warn(clippy::missing_docs_in_private_items)]
|
||||
#![warn(clippy::too_many_lines)]
|
||||
|
||||
@@ -40,6 +40,7 @@ pub(crate) const ESTIMATED_META_SIZE: usize = 256;
|
||||
/// - If the entry is able to fit into a Kafka record, it's converted into a Full record.
|
||||
///
|
||||
/// - If the entry is too large to fit into a Kafka record, it's converted into a collection of records.
|
||||
///
|
||||
/// Those records must contain exactly one First record and one Last record, and potentially several
|
||||
/// Middle records. There may be no Middle record.
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
||||
|
||||
@@ -23,6 +23,7 @@ use store_api::logstore::entry::{Entry, NaiveEntry};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
#[allow(renamed_and_removed_lints)]
|
||||
pub mod protos {
|
||||
include!(concat!(env!("OUT_DIR"), concat!("/", "protos/", "mod.rs")));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#![feature(async_closure)]
|
||||
#![feature(result_flattening)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(option_take_if)]
|
||||
#![feature(extract_if)]
|
||||
|
||||
pub mod bootstrap;
|
||||
|
||||
@@ -27,6 +27,7 @@ pub trait WeightedChoose<Item>: Send + Sync {
|
||||
/// Note:
|
||||
/// 1. make sure weight_array is not empty.
|
||||
/// 2. the total weight is greater than 0.
|
||||
///
|
||||
/// Otherwise an error will be returned.
|
||||
fn set_weight_array(&mut self, weight_array: Vec<WeightedItem<Item>>) -> Result<()>;
|
||||
|
||||
|
||||
@@ -960,6 +960,7 @@ pub fn build_rows(start: usize, end: usize) -> Vec<Row> {
|
||||
/// - `key`: A string key that is common across all rows.
|
||||
/// - `timestamps`: Array of timestamp values.
|
||||
/// - `fields`: Array of tuples where each tuple contains two optional i64 values, representing two optional float fields.
|
||||
///
|
||||
/// Returns a vector of `Row` each containing the key, two optional float fields, and a timestamp.
|
||||
pub fn build_rows_with_fields(
|
||||
key: &str,
|
||||
|
||||
@@ -159,9 +159,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let create_res = self.inner.create_dir(path, args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
create_res.map_err(|e| {
|
||||
create_res.inspect_err(|e| {
|
||||
increment_errors_total(Operation::CreateDir, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -175,9 +174,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
.with_label_values(&[&self.scheme, Operation::Read.into_static(), path_label])
|
||||
.start_timer();
|
||||
|
||||
let (rp, r) = self.inner.read(path, args).await.map_err(|e| {
|
||||
let (rp, r) = self.inner.read(path, args).await.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Read, e.kind());
|
||||
e
|
||||
})?;
|
||||
|
||||
Ok((
|
||||
@@ -205,9 +203,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
.with_label_values(&[&self.scheme, Operation::Write.into_static(), path_label])
|
||||
.start_timer();
|
||||
|
||||
let (rp, r) = self.inner.write(path, args).await.map_err(|e| {
|
||||
let (rp, r) = self.inner.write(path, args).await.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Write, e.kind());
|
||||
e
|
||||
})?;
|
||||
|
||||
Ok((
|
||||
@@ -236,9 +233,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
|
||||
let stat_res = self.inner.stat(path, args).await;
|
||||
timer.observe_duration();
|
||||
stat_res.map_err(|e| {
|
||||
stat_res.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Stat, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -254,9 +250,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
|
||||
let delete_res = self.inner.delete(path, args).await;
|
||||
timer.observe_duration();
|
||||
delete_res.map_err(|e| {
|
||||
delete_res.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Delete, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -273,9 +268,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let list_res = self.inner.list(path, args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
list_res.map_err(|e| {
|
||||
list_res.inspect_err(|e| {
|
||||
increment_errors_total(Operation::List, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -290,9 +284,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let result = self.inner.batch(args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Batch, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -308,9 +301,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let result = self.inner.presign(path, args).await;
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Presign, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -335,9 +327,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingCreateDir, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -376,9 +367,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
),
|
||||
)
|
||||
})
|
||||
.map_err(|e| {
|
||||
.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingRead, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -417,9 +407,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
),
|
||||
)
|
||||
})
|
||||
.map_err(|e| {
|
||||
.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingWrite, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -442,9 +431,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_stat(path, args);
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingStat, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -468,9 +456,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let result = self.inner.blocking_delete(path, args);
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingDelete, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
@@ -494,9 +481,8 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
|
||||
let result = self.inner.blocking_list(path, args);
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
result.inspect_err(|e| {
|
||||
increment_errors_total(Operation::BlockingList, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -535,18 +521,16 @@ impl<R> PrometheusMetricWrapper<R> {
|
||||
|
||||
impl<R: oio::Read> oio::Read for PrometheusMetricWrapper<R> {
|
||||
async fn read(&mut self) -> Result<Buffer> {
|
||||
self.inner.read().await.map_err(|err| {
|
||||
self.inner.read().await.inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: oio::BlockingRead> oio::BlockingRead for PrometheusMetricWrapper<R> {
|
||||
fn read(&mut self) -> opendal::Result<Buffer> {
|
||||
self.inner.read().map_err(|err| {
|
||||
self.inner.read().inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -567,16 +551,14 @@ impl<R: oio::Write> oio::Write for PrometheusMetricWrapper<R> {
|
||||
}
|
||||
|
||||
async fn close(&mut self) -> Result<()> {
|
||||
self.inner.close().await.map_err(|err| {
|
||||
self.inner.close().await.inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
async fn abort(&mut self) -> Result<()> {
|
||||
self.inner.close().await.map_err(|err| {
|
||||
self.inner.close().await.inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -589,16 +571,14 @@ impl<R: oio::BlockingWrite> oio::BlockingWrite for PrometheusMetricWrapper<R> {
|
||||
.map(|_| {
|
||||
self.bytes += bytes as u64;
|
||||
})
|
||||
.map_err(|err| {
|
||||
.inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Result<()> {
|
||||
self.inner.close().map_err(|err| {
|
||||
self.inner.close().inspect_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ use crate::range_array::RangeArray;
|
||||
/// 2) "Holt's linear method" (a.k.a. "double exponential smoothing"): `level` and `trend` components are used to make forecasts.
|
||||
/// This method is applied for time-series data that exhibits trend but not seasonality.
|
||||
/// 3) "Holt-Winter's method" (a.k.a. "triple exponential smoothing"): `level`, `trend`, and `seasonality` are used to make forecasts.
|
||||
///
|
||||
/// This method is applied for time-series data that exhibits both trend and seasonality.
|
||||
///
|
||||
/// In order to keep the parity with the Prometheus functions we had to follow the same naming ("HoltWinters"), however
|
||||
|
||||
@@ -666,6 +666,7 @@ impl PromPlanner {
|
||||
/// Name rule:
|
||||
/// - if `name` is some, then the matchers MUST NOT contain `__name__` matcher.
|
||||
/// - if `name` is none, then the matchers MAY contain NONE OR MULTIPLE `__name__` matchers.
|
||||
#[allow(clippy::mutable_key_type)]
|
||||
fn preprocess_label_matchers(
|
||||
&mut self,
|
||||
label_matchers: &Matchers,
|
||||
|
||||
@@ -70,7 +70,7 @@ impl QueryEngineContext {
|
||||
}
|
||||
|
||||
/// Mock an engine context for unit tests.
|
||||
#[cfg(any(test, feature = "test"))]
|
||||
#[cfg(test)]
|
||||
pub fn mock() -> Self {
|
||||
use common_base::Plugins;
|
||||
use session::context::QueryContext;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -31,8 +32,9 @@ use tokio::runtime::Runtime;
|
||||
static SCRIPT_ENGINE: Lazy<PyEngine> = Lazy::new(sample_script_engine);
|
||||
static LOCAL_RUNTIME: OnceCell<tokio::runtime::Runtime> = OnceCell::new();
|
||||
fn get_local_runtime() -> std::thread::Result<&'static Runtime> {
|
||||
let rt = LOCAL_RUNTIME
|
||||
.get_or_try_init(|| tokio::runtime::Runtime::new().map_err(|e| Box::new(e) as _))?;
|
||||
let rt = LOCAL_RUNTIME.get_or_try_init(|| {
|
||||
tokio::runtime::Runtime::new().map_err(|e| Box::new(e) as Box<dyn Any + Send + 'static>)
|
||||
})?;
|
||||
Ok(rt)
|
||||
}
|
||||
/// a terrible hack to call async from sync by:
|
||||
|
||||
@@ -110,9 +110,8 @@ impl GreptimeRequestHandler {
|
||||
.spawn(result_future)
|
||||
.await
|
||||
.context(JoinTaskSnafu)
|
||||
.map_err(|e| {
|
||||
.inspect_err(|e| {
|
||||
timer.record(e.status_code());
|
||||
e
|
||||
})?
|
||||
}
|
||||
None => result_future.await,
|
||||
@@ -160,11 +159,10 @@ pub(crate) async fn auth(
|
||||
name: "Token AuthScheme".to_string(),
|
||||
}),
|
||||
}
|
||||
.map_err(|e| {
|
||||
.inspect_err(|e| {
|
||||
METRIC_AUTH_FAILURE
|
||||
.with_label_values(&[e.status_code().as_ref()])
|
||||
.inc();
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -42,11 +42,10 @@ pub(crate) static JEMALLOC_COLLECTOR: Lazy<Option<JemallocCollector>> = Lazy::ne
|
||||
e
|
||||
})
|
||||
.ok();
|
||||
collector.map(|c| {
|
||||
collector.inspect(|c| {
|
||||
if let Err(e) = c.update() {
|
||||
error!(e; "Failed to update jemalloc metrics");
|
||||
};
|
||||
c
|
||||
})
|
||||
});
|
||||
|
||||
|
||||
@@ -196,7 +196,7 @@ fn select_variable(query: &str, query_context: QueryContextRef) -> Option<Output
|
||||
// @@aa
|
||||
// field is '@@aa'
|
||||
fields.push(ColumnSchema::new(
|
||||
&format!("@@{}", var_as[0]),
|
||||
format!("@@{}", var_as[0]),
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
));
|
||||
|
||||
@@ -32,7 +32,7 @@ const APPROXIMATE_COLUMN_COUNT: usize = 8;
|
||||
/// better sql usability
|
||||
/// - replace `.` and `-` with `_`
|
||||
fn normalize_otlp_name(name: &str) -> String {
|
||||
name.to_lowercase().replace(|c| c == '.' || c == '-', "_")
|
||||
name.to_lowercase().replace(['.', '-'], "_")
|
||||
}
|
||||
|
||||
/// Convert OpenTelemetry metrics to GreptimeDB insert requests
|
||||
@@ -174,7 +174,7 @@ fn encode_gauge(
|
||||
scope_attrs: Option<&Vec<KeyValue>>,
|
||||
) -> Result<()> {
|
||||
let table = table_writer.get_or_default_table_data(
|
||||
&normalize_otlp_name(name),
|
||||
normalize_otlp_name(name),
|
||||
APPROXIMATE_COLUMN_COUNT,
|
||||
gauge.data_points.len(),
|
||||
);
|
||||
@@ -208,7 +208,7 @@ fn encode_sum(
|
||||
scope_attrs: Option<&Vec<KeyValue>>,
|
||||
) -> Result<()> {
|
||||
let table = table_writer.get_or_default_table_data(
|
||||
&normalize_otlp_name(name),
|
||||
normalize_otlp_name(name),
|
||||
APPROXIMATE_COLUMN_COUNT,
|
||||
sum.data_points.len(),
|
||||
);
|
||||
@@ -358,7 +358,7 @@ fn encode_summary(
|
||||
scope_attrs: Option<&Vec<KeyValue>>,
|
||||
) -> Result<()> {
|
||||
let table = table_writer.get_or_default_table_data(
|
||||
&normalize_otlp_name(name),
|
||||
normalize_otlp_name(name),
|
||||
APPROXIMATE_COLUMN_COUNT,
|
||||
summary.data_points.len(),
|
||||
);
|
||||
@@ -377,7 +377,7 @@ fn encode_summary(
|
||||
for quantile in &data_point.quantile_values {
|
||||
row_writer::write_f64(
|
||||
table,
|
||||
&format!("greptime_p{:02}", quantile.quantile * 100f64),
|
||||
format!("greptime_p{:02}", quantile.quantile * 100f64),
|
||||
quantile.value,
|
||||
&mut row,
|
||||
)?;
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#![feature(box_patterns)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(lazy_cell)]
|
||||
|
||||
pub mod ast;
|
||||
pub mod dialect;
|
||||
|
||||
@@ -57,6 +57,7 @@ lazy_static! {
|
||||
/// - `ms` for `milliseconds`
|
||||
/// - `us` for `microseconds`
|
||||
/// - `ns` for `nanoseconds`
|
||||
///
|
||||
/// Required for scenarios that use the shortened version of `INTERVAL`,
|
||||
/// f.e `SELECT INTERVAL '1h'` or `SELECT INTERVAL '3w2d'`
|
||||
pub(crate) struct ExpandIntervalTransformRule;
|
||||
@@ -149,6 +150,7 @@ fn update_existing_interval_with_value(interval: &Interval, value: Box<Expr>) ->
|
||||
/// If the `interval_str` contains whitespaces, the interval name is considered to be in a full form.
|
||||
/// 2. ISO 8601 format strings (e.g., "P1Y2M3D"), case/sign independent
|
||||
/// Returns a number of milliseconds corresponding to ISO 8601 (e.g., "36525000 milliseconds")
|
||||
///
|
||||
/// Note: Hybrid format "1y 2 days 3h" is not supported.
|
||||
fn normalize_interval_name(interval_str: &str) -> Option<String> {
|
||||
if interval_str.contains(char::is_whitespace) {
|
||||
|
||||
Reference in New Issue
Block a user