mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
build: update toolchain to nightly-2024-04-18 (#3740)
* chore: update toolchain to nightly-2024-04-17 Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * fix test clippy Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * fix ut Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * update fuzz test Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * update to nightly-2024-04-18 Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * add document Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * update CI Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * avoid unnecessary allow clippy attrs Signed-off-by: tison <wander4096@gmail.com> * help the compiler find the clone is unnecessary and make clippy happy Signed-off-by: tison <wander4096@gmail.com> --------- Signed-off-by: Ruihang Xia <waynestxia@gmail.com> Signed-off-by: tison <wander4096@gmail.com> Co-authored-by: tison <wander4096@gmail.com>
This commit is contained in:
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
2
.github/workflows/develop.yml
vendored
2
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
|
||||
2
.github/workflows/nightly-ci.yml
vendored
2
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
|
||||
1768
Cargo.lock
generated
1768
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,10 @@ license = "Apache-2.0"
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-12-19"
|
||||
channel = "nightly-2024-04-18"
|
||||
|
||||
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
|
||||
|
||||
impl MockUserProvider {
|
||||
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
||||
self.catalog = info.catalog.to_owned();
|
||||
self.schema = info.schema.to_owned();
|
||||
self.username = info.username.to_owned();
|
||||
info.catalog.clone_into(&mut self.catalog);
|
||||
info.schema.clone_into(&mut self.schema);
|
||||
info.username.clone_into(&mut self.username);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -109,11 +109,7 @@ impl Predicate {
|
||||
};
|
||||
}
|
||||
Predicate::Not(p) => {
|
||||
let Some(b) = p.eval(row) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
return Some(!b);
|
||||
return Some(!p.eval(row)?);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,13 +121,7 @@ impl Predicate {
|
||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||
match expr {
|
||||
// NOT expr
|
||||
DfExpr::Not(expr) => {
|
||||
let Some(p) = Self::from_expr(*expr) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
Some(Predicate::Not(Box::new(p)))
|
||||
}
|
||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
// expr LIKE pattern
|
||||
DfExpr::Like(Like {
|
||||
negated,
|
||||
@@ -178,25 +168,15 @@ impl Predicate {
|
||||
}
|
||||
// left AND right
|
||||
(left, Operator::And, right) => {
|
||||
let Some(left) = Self::from_expr(left) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let Some(right) = Self::from_expr(right) else {
|
||||
return None;
|
||||
};
|
||||
let left = Self::from_expr(left)?;
|
||||
let right = Self::from_expr(right)?;
|
||||
|
||||
Some(Predicate::And(Box::new(left), Box::new(right)))
|
||||
}
|
||||
// left OR right
|
||||
(left, Operator::Or, right) => {
|
||||
let Some(left) = Self::from_expr(left) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let Some(right) = Self::from_expr(right) else {
|
||||
return None;
|
||||
};
|
||||
let left = Self::from_expr(left)?;
|
||||
let right = Self::from_expr(right)?;
|
||||
|
||||
Some(Predicate::Or(Box::new(left), Box::new(right)))
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::fmt::Debug;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use std::usize;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
|
||||
@@ -84,10 +84,10 @@ impl Command {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
logging_opts.dir = dir.clone();
|
||||
logging_opts.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
logging_opts.level = cli_options.log_level.clone();
|
||||
logging_opts.level.clone_from(&cli_options.log_level);
|
||||
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
|
||||
@@ -139,19 +139,19 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.rpc_addr = addr.clone();
|
||||
opts.rpc_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if self.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
opts.rpc_hostname.clone_from(&self.rpc_hostname);
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
@@ -161,7 +161,8 @@ impl StartCommand {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
@@ -173,7 +174,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
// `wal_dir` only affects raft-engine config.
|
||||
@@ -191,7 +192,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
|
||||
@@ -157,11 +157,11 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -171,7 +171,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
opts.http.addr = addr.clone()
|
||||
opts.http.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
@@ -183,24 +183,24 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr = addr.clone()
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr = addr.clone();
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr = addr.clone();
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr = addr.clone();
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
@@ -210,11 +210,12 @@ impl StartCommand {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
@@ -134,23 +134,23 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr = addr.clone();
|
||||
opts.bind_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.server_addr {
|
||||
opts.server_addr = addr.clone();
|
||||
opts.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.store_addr {
|
||||
opts.store_addr = addr.clone();
|
||||
opts.store_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(selector_type) = &self.selector {
|
||||
@@ -168,7 +168,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
@@ -176,11 +176,11 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.data_home = data_home.clone();
|
||||
opts.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
if !self.store_key_prefix.is_empty() {
|
||||
opts.store_key_prefix = self.store_key_prefix.clone()
|
||||
opts.store_key_prefix.clone_from(&self.store_key_prefix)
|
||||
}
|
||||
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
|
||||
@@ -293,11 +293,11 @@ impl StartCommand {
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -307,11 +307,11 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
opts.http.addr = addr.clone()
|
||||
opts.http.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
@@ -325,31 +325,31 @@ impl StartCommand {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
opts.grpc.addr = addr.clone()
|
||||
opts.grpc.addr.clone_from(addr)
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr = addr.clone();
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr = addr.clone();
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr = addr.clone();
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
|
||||
@@ -240,10 +240,14 @@ impl DatanodeTableManager {
|
||||
// FIXME(weny): add unit tests.
|
||||
let mut new_region_info = region_info.clone();
|
||||
if need_update_options {
|
||||
new_region_info.region_options = new_region_options.clone();
|
||||
new_region_info
|
||||
.region_options
|
||||
.clone_from(new_region_options);
|
||||
}
|
||||
if need_update_wal_options {
|
||||
new_region_info.region_wal_options = new_region_wal_options.clone();
|
||||
new_region_info
|
||||
.region_wal_options
|
||||
.clone_from(new_region_wal_options);
|
||||
}
|
||||
let val = DatanodeTableValue::new(table_id, regions, new_region_info)
|
||||
.try_as_raw_value()?;
|
||||
|
||||
@@ -172,9 +172,7 @@ impl Inner {
|
||||
|
||||
if !res.success {
|
||||
if let Some(kv) = res.prev_kv {
|
||||
expect = kv.value.clone();
|
||||
|
||||
let v: [u8; 8] = match kv.value.try_into() {
|
||||
let v: [u8; 8] = match kv.value.clone().try_into() {
|
||||
Ok(a) => a,
|
||||
Err(v) => {
|
||||
return error::UnexpectedSequenceValueSnafu {
|
||||
@@ -184,13 +182,12 @@ impl Inner {
|
||||
}
|
||||
};
|
||||
let v = u64::from_le_bytes(v);
|
||||
|
||||
// If the existed value is smaller than the initial, we should start from the initial.
|
||||
start = v.max(self.initial);
|
||||
expect = kv.value;
|
||||
} else {
|
||||
expect = vec![];
|
||||
|
||||
start = self.initial;
|
||||
expect = vec![];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_procedure::store::util::multiple_value_stream;
|
||||
use common_procedure::Result as ProcedureResult;
|
||||
use futures::future::try_join_all;
|
||||
use futures::StreamExt;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -79,17 +80,21 @@ fn decode_kv(kv: KeyValue) -> Result<(String, Vec<u8>)> {
|
||||
Ok((key, value))
|
||||
}
|
||||
|
||||
enum SplitValue<'a> {
|
||||
Single(&'a [u8]),
|
||||
Multiple(Vec<&'a [u8]>),
|
||||
enum SplitValue {
|
||||
Single(Vec<u8>),
|
||||
Multiple(Vec<Vec<u8>>),
|
||||
}
|
||||
|
||||
fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
|
||||
fn split_value(value: Vec<u8>, max_value_size: Option<usize>) -> SplitValue {
|
||||
if let Some(max_value_size) = max_value_size {
|
||||
if value.len() <= max_value_size {
|
||||
SplitValue::Single(value)
|
||||
} else {
|
||||
SplitValue::Multiple(value.chunks(max_value_size).collect::<Vec<_>>())
|
||||
let mut values = vec![];
|
||||
for chunk in value.into_iter().chunks(max_value_size).into_iter() {
|
||||
values.push(chunk.collect());
|
||||
}
|
||||
SplitValue::Multiple(values)
|
||||
}
|
||||
} else {
|
||||
SplitValue::Single(value)
|
||||
@@ -99,10 +104,10 @@ fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
|
||||
#[async_trait]
|
||||
impl StateStore for KvStateStore {
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> ProcedureResult<()> {
|
||||
let split = split_value(&value, self.max_value_size);
|
||||
let split = split_value(value, self.max_value_size);
|
||||
let key = with_prefix(key);
|
||||
match split {
|
||||
SplitValue::Single(_) => {
|
||||
SplitValue::Single(value) => {
|
||||
self.kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
|
||||
@@ -163,7 +163,7 @@ mod tests {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
let mut topic_manager = KafkaTopicManager::new(config.clone(), kv_backend);
|
||||
// Replaces the default topic pool with the constructed topics.
|
||||
topic_manager.topic_pool = topics.clone();
|
||||
topic_manager.topic_pool.clone_from(&topics);
|
||||
// Replaces the default selector with a round-robin selector without shuffled.
|
||||
topic_manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||
|
||||
|
||||
@@ -291,7 +291,7 @@ mod tests {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
let mut manager = TopicManager::new(config.clone(), kv_backend);
|
||||
// Replaces the default topic pool with the constructed topics.
|
||||
manager.topic_pool = topics.clone();
|
||||
manager.topic_pool.clone_from(&topics);
|
||||
// Replaces the default selector with a round-robin selector without shuffled.
|
||||
manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||
manager.start().await.unwrap();
|
||||
|
||||
@@ -19,19 +19,19 @@ use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
|
||||
|
||||
pub enum OwnedKeyRwLockGuard {
|
||||
Read(OwnedRwLockReadGuard<()>),
|
||||
Write(OwnedRwLockWriteGuard<()>),
|
||||
Read { _guard: OwnedRwLockReadGuard<()> },
|
||||
Write { _guard: OwnedRwLockWriteGuard<()> },
|
||||
}
|
||||
|
||||
impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
|
||||
fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
|
||||
OwnedKeyRwLockGuard::Read(guard)
|
||||
OwnedKeyRwLockGuard::Read { _guard: guard }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
|
||||
fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
|
||||
OwnedKeyRwLockGuard::Write(guard)
|
||||
OwnedKeyRwLockGuard::Write { _guard: guard }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,9 +44,7 @@ pub fn build_filter_from_timestamp(
|
||||
ts_col_name: &str,
|
||||
time_range: Option<&TimestampRange>,
|
||||
) -> Option<Expr> {
|
||||
let Some(time_range) = time_range else {
|
||||
return None;
|
||||
};
|
||||
let time_range = time_range?;
|
||||
let ts_col_expr = DfExpr::Column(Column {
|
||||
relation: None,
|
||||
name: ts_col_name.to_string(),
|
||||
|
||||
@@ -381,7 +381,7 @@ impl Timestamp {
|
||||
|
||||
pub fn from_chrono_datetime(ndt: NaiveDateTime) -> Option<Self> {
|
||||
let sec = ndt.and_utc().timestamp();
|
||||
let nsec = ndt.timestamp_subsec_nanos();
|
||||
let nsec = ndt.and_utc().timestamp_subsec_nanos();
|
||||
Timestamp::from_splits(sec, nsec)
|
||||
}
|
||||
|
||||
@@ -1065,7 +1065,7 @@ mod tests {
|
||||
let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
|
||||
let time = DateTime::from_timestamp(sec, nsec).unwrap().naive_utc();
|
||||
assert_eq!(sec, time.and_utc().timestamp());
|
||||
assert_eq!(nsec, time.timestamp_subsec_nanos());
|
||||
assert_eq!(nsec, time.and_utc().timestamp_subsec_nanos());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -120,7 +120,7 @@ where
|
||||
|
||||
// update min/max, assume values are appended in lexicographic order
|
||||
if stats.distinct_count == 1 {
|
||||
stats.min_value = value.clone();
|
||||
stats.min_value.clone_from(&value);
|
||||
}
|
||||
stats.max_value = value;
|
||||
}
|
||||
|
||||
@@ -510,9 +510,6 @@ impl MetaClient {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::meta::{HeartbeatRequest, Peer};
|
||||
use meta_srv::metasrv::SelectorContext;
|
||||
use meta_srv::selector::{Namespace, Selector, SelectorOptions};
|
||||
use meta_srv::Result as MetaResult;
|
||||
|
||||
use super::*;
|
||||
use crate::{error, mocks};
|
||||
@@ -662,36 +659,6 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
struct MockSelector;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Selector for MockSelector {
|
||||
type Context = SelectorContext;
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
_ns: Namespace,
|
||||
_ctx: &Self::Context,
|
||||
_opts: SelectorOptions,
|
||||
) -> MetaResult<Self::Output> {
|
||||
Ok(vec![
|
||||
Peer {
|
||||
id: 0,
|
||||
addr: "peer0".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 1,
|
||||
addr: "peer1".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 2,
|
||||
addr: "peer2".to_string(),
|
||||
},
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range_get() {
|
||||
let tc = new_client("test_range_get").await;
|
||||
|
||||
@@ -65,7 +65,7 @@ use crate::procedure::region_migration::DefaultContextFactory;
|
||||
use crate::pubsub::PublishRef;
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::service::mailbox::MailboxRef;
|
||||
use crate::service::store::cached_kv::{CheckLeader, LeaderCachedKvBackend};
|
||||
use crate::service::store::cached_kv::LeaderCachedKvBackend;
|
||||
use crate::state::State;
|
||||
use crate::table_meta_alloc::MetasrvPeerAllocator;
|
||||
|
||||
@@ -436,13 +436,3 @@ impl Default for MetasrvBuilder {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
struct CheckLeaderByElection(Option<ElectionRef>);
|
||||
|
||||
impl CheckLeader for CheckLeaderByElection {
|
||||
fn check(&self) -> bool {
|
||||
self.0
|
||||
.as_ref()
|
||||
.map_or(false, |election| election.is_leader())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,11 +205,9 @@ mod tests {
|
||||
let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
|
||||
|
||||
assert_eq!(1, alive_stat_kvs.len());
|
||||
assert!(alive_stat_kvs
|
||||
.get(&StatKey {
|
||||
assert!(alive_stat_kvs.contains_key(&StatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 1
|
||||
})
|
||||
.is_some());
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ impl KvBackend for LeaderCachedKvBackend {
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
metrics::METRIC_META_KV_CACHE_HIT
|
||||
.with_label_values(&[&"batch_get"])
|
||||
.with_label_values(&["batch_get"])
|
||||
.inc_by(hit_keys.len() as u64);
|
||||
|
||||
let missed_keys = req
|
||||
@@ -273,7 +273,7 @@ impl KvBackend for LeaderCachedKvBackend {
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
metrics::METRIC_META_KV_CACHE_MISS
|
||||
.with_label_values(&[&"batch_get"])
|
||||
.with_label_values(&["batch_get"])
|
||||
.inc_by(missed_keys.len() as u64);
|
||||
|
||||
let remote_req = BatchGetRequest { keys: missed_keys };
|
||||
|
||||
@@ -557,12 +557,12 @@ mod test {
|
||||
|
||||
let mut options = HashMap::new();
|
||||
options.insert(PHYSICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
|
||||
request.options = options.clone();
|
||||
request.options.clone_from(&options);
|
||||
let result = MetricEngineInner::verify_region_create_request(&request);
|
||||
assert!(result.is_ok());
|
||||
|
||||
options.insert(LOGICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
|
||||
request.options = options.clone();
|
||||
request.options.clone_from(&options);
|
||||
let result = MetricEngineInner::verify_region_create_request(&request);
|
||||
assert!(result.is_err());
|
||||
|
||||
@@ -620,7 +620,7 @@ mod test {
|
||||
data_region_request.primary_key,
|
||||
vec![ReservedColumnId::table_id(), ReservedColumnId::tsid(), 1]
|
||||
);
|
||||
assert!(data_region_request.options.get("ttl").is_some());
|
||||
assert!(data_region_request.options.contains_key("ttl"));
|
||||
|
||||
// check create metadata region request
|
||||
let metadata_region_request = engine_inner.create_request_for_metadata_region(&request);
|
||||
@@ -628,6 +628,6 @@ mod test {
|
||||
metadata_region_request.region_dir,
|
||||
"/test_dir/metadata/".to_string()
|
||||
);
|
||||
assert!(metadata_region_request.options.get("ttl").is_none());
|
||||
assert!(!metadata_region_request.options.contains_key("ttl"));
|
||||
}
|
||||
}
|
||||
|
||||
2
src/mito2/src/cache/cache_size.rs
vendored
2
src/mito2/src/cache/cache_size.rs
vendored
@@ -137,6 +137,6 @@ mod tests {
|
||||
fn test_parquet_meta_size() {
|
||||
let metadata = parquet_meta();
|
||||
|
||||
assert_eq!(956, parquet_meta_size(&metadata));
|
||||
assert_eq!(964, parquet_meta_size(&metadata));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,9 +497,7 @@ impl FlushScheduler {
|
||||
&mut self,
|
||||
region_id: RegionId,
|
||||
) -> Option<(Vec<SenderDdlRequest>, Vec<SenderWriteRequest>)> {
|
||||
let Some(flush_status) = self.region_status.get_mut(®ion_id) else {
|
||||
return None;
|
||||
};
|
||||
let flush_status = self.region_status.get_mut(®ion_id)?;
|
||||
|
||||
// This region doesn't have running flush job.
|
||||
flush_status.flushing = false;
|
||||
|
||||
@@ -440,7 +440,7 @@ mod tests {
|
||||
// No need to add compatible tests for RegionOptions since the above tests already check for compatibility.
|
||||
#[test]
|
||||
fn test_with_any_wal_options() {
|
||||
let all_wal_options = vec![
|
||||
let all_wal_options = [
|
||||
WalOptions::RaftEngine,
|
||||
WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: "test_topic".to_string(),
|
||||
|
||||
@@ -82,7 +82,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_non_contiguous_ranges() {
|
||||
let ranges = vec![1..3, 5..8];
|
||||
let ranges = [1..3, 5..8];
|
||||
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
|
||||
let expected = RowSelection::from(vec![
|
||||
RowSelector::skip(1),
|
||||
@@ -96,7 +96,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_empty_range() {
|
||||
let ranges = vec![];
|
||||
let ranges = [];
|
||||
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
|
||||
let expected = RowSelection::from(vec![]);
|
||||
assert_eq!(selection, expected);
|
||||
@@ -105,7 +105,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_adjacent_ranges() {
|
||||
let ranges = vec![1..2, 2..3];
|
||||
let ranges = [1..2, 2..3];
|
||||
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
|
||||
let expected = RowSelection::from(vec![RowSelector::skip(1), RowSelector::select(2)]);
|
||||
assert_eq!(selection, expected);
|
||||
@@ -114,7 +114,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_large_gap_between_ranges() {
|
||||
let ranges = vec![1..2, 100..101];
|
||||
let ranges = [1..2, 100..101];
|
||||
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10240);
|
||||
let expected = RowSelection::from(vec![
|
||||
RowSelector::skip(1),
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::usize;
|
||||
|
||||
use client::{Output, OutputData, OutputMeta};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
|
||||
@@ -791,7 +791,7 @@ impl PromPlanner {
|
||||
}
|
||||
|
||||
// change the tag columns in context
|
||||
self.ctx.tag_columns = labels.labels.clone();
|
||||
self.ctx.tag_columns.clone_from(&labels.labels);
|
||||
|
||||
// add timestamp column
|
||||
exprs.push(self.create_time_index_column_expr()?);
|
||||
|
||||
@@ -29,9 +29,6 @@ pub trait ExtensionAnalyzerRule {
|
||||
ctx: &QueryEngineContext,
|
||||
config: &ConfigOptions,
|
||||
) -> Result<LogicalPlan>;
|
||||
|
||||
/// A human readable name for this analyzer rule
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub mod order_hint;
|
||||
|
||||
@@ -121,10 +121,6 @@ impl ExtensionAnalyzerRule for TypeConversionRule {
|
||||
})
|
||||
.map(|x| x.data)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"TypeConversionRule"
|
||||
}
|
||||
}
|
||||
|
||||
struct TypeConverter {
|
||||
|
||||
@@ -58,7 +58,7 @@ struct DataSourceWrapper {
|
||||
|
||||
impl DataSource for DataSourceWrapper {
|
||||
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
*self.filter.write().unwrap() = request.filters.clone();
|
||||
self.filter.write().unwrap().clone_from(&request.filters);
|
||||
self.inner.get_stream(request)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::python::rspython::dataframe_impl::data_frame::set_dataframe_in_scope;
|
||||
use crate::python::rspython::dataframe_impl::init_data_frame;
|
||||
use crate::python::rspython::utils::{format_py_error, is_instance, py_obj_to_vec};
|
||||
|
||||
thread_local!(static INTERPRETER: RefCell<Option<Rc<Interpreter>>> = RefCell::new(None));
|
||||
thread_local!(static INTERPRETER: RefCell<Option<Rc<Interpreter>>> = const { RefCell::new(None) });
|
||||
|
||||
/// Using `RustPython` to run a parsed `Coprocessor` struct as input to execute python code
|
||||
pub(crate) fn rspy_exec_parsed(
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use sqlparser::ast::Statement as SpStatement;
|
||||
use sqlparser_derive::{Visit, VisitMut};
|
||||
|
||||
@@ -31,8 +33,8 @@ impl TryFrom<SpStatement> for Explain {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for Explain {
|
||||
fn to_string(&self) -> String {
|
||||
self.inner.to_string()
|
||||
impl Display for Explain {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -573,6 +573,7 @@ mod tests {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(path.clone())
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub(crate) trait DslExecutor<T, U> {
|
||||
type Error: Sync + Send + fmt::Debug;
|
||||
|
||||
async fn execute(&self, input: &T) -> Result<U, Self::Error>;
|
||||
}
|
||||
@@ -50,7 +50,7 @@ impl<R: Rng + 'static> Generator<InsertIntoExpr, R> for InsertExprGenerator<R> {
|
||||
let mut values_columns = vec![];
|
||||
if omit_column_list {
|
||||
// If omit column list, then all columns are required in the values list
|
||||
values_columns = self.table_ctx.columns.clone();
|
||||
values_columns.clone_from(&self.table_ctx.columns);
|
||||
} else {
|
||||
for column in &self.table_ctx.columns {
|
||||
let can_omit = column.is_nullable() || column.has_default_value();
|
||||
|
||||
@@ -12,11 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(associated_type_bounds)]
|
||||
|
||||
pub mod context;
|
||||
pub mod error;
|
||||
pub mod executor;
|
||||
pub mod fake;
|
||||
pub mod generator;
|
||||
pub mod ir;
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
@@ -24,42 +23,6 @@ use tokio::time;
|
||||
|
||||
/// Check port every 0.1 second.
|
||||
const PORT_CHECK_INTERVAL: Duration = Duration::from_millis(100);
|
||||
const NULL_DATA_PLACEHOLDER: &str = "NULL";
|
||||
|
||||
/// Helper struct for iterate over column with null_mask
|
||||
struct NullableColumnIter<N, B, D, T>
|
||||
where
|
||||
N: Iterator<Item = B>,
|
||||
B: AsRef<bool>,
|
||||
D: Iterator<Item = T>,
|
||||
T: Display,
|
||||
{
|
||||
null_iter: N,
|
||||
data_iter: D,
|
||||
}
|
||||
|
||||
impl<N, B, D, T> Iterator for NullableColumnIter<N, B, D, T>
|
||||
where
|
||||
N: Iterator<Item = B>,
|
||||
B: AsRef<bool>,
|
||||
D: Iterator<Item = T>,
|
||||
T: Display,
|
||||
{
|
||||
type Item = String;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// iter the null_mask first
|
||||
if let Some(is_null) = self.null_iter.next() {
|
||||
if *is_null.as_ref() {
|
||||
Some(NULL_DATA_PLACEHOLDER.to_string())
|
||||
} else {
|
||||
self.data_iter.next().map(|data| data.to_string())
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the dir of test cases. This function only works when the runner is run
|
||||
/// under the project's dir because it depends on some envs set by cargo.
|
||||
|
||||
Reference in New Issue
Block a user