chore: upgrade some dependencies (#5777)

* chore: upgrade some dependencies

* chore: upgrade some dependencies

* fix: cr

* fix: ci

* fix: test

* fix: cargo fmt
This commit is contained in:
fys
2025-03-27 10:48:44 +08:00
committed by GitHub
parent e107bd5529
commit 2b2ea5bf72
55 changed files with 665 additions and 555 deletions

View File

@@ -177,7 +177,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
let mut region_routes = Vec::with_capacity(100);
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
for region_id in regions.into_iter().map(u64::from) {
region_routes.push(RegionRoute {
@@ -188,7 +188,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer {
id: rng.gen_range(0..10),
id: rng.random_range(0..10),
addr: String::new(),
}),
follower_peers: vec![],

View File

@@ -13,7 +13,7 @@
// limitations under the License.
use enum_dispatch::enum_dispatch;
use rand::seq::SliceRandom;
use rand::seq::IndexedRandom;
#[enum_dispatch]
pub trait LoadBalance {
@@ -37,7 +37,7 @@ pub struct Random;
impl LoadBalance for Random {
fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String> {
peers.choose(&mut rand::thread_rng())
peers.choose(&mut rand::rng())
}
}

View File

@@ -25,7 +25,7 @@ async fn do_bench_channel_manager() {
let m_clone = m.clone();
let join = tokio::spawn(async move {
for _ in 0..10000 {
let idx = rand::random::<usize>() % 100;
let idx = rand::random::<u32>() % 100;
let ret = m_clone.get(format!("{idx}"));
let _ = ret.unwrap();
}

View File

@@ -290,13 +290,13 @@ mod tests {
num_per_range: u32,
max_bytes: u32,
) {
let num_cases = rand::thread_rng().gen_range(1..=8);
let num_cases = rand::rng().random_range(1..=8);
common_telemetry::info!("num_cases: {}", num_cases);
let mut cases = Vec::with_capacity(num_cases);
for i in 0..num_cases {
let size = rand::thread_rng().gen_range(size_limit..=max_bytes);
let size = rand::rng().random_range(size_limit..=max_bytes);
let mut large_value = vec![0u8; size as usize];
rand::thread_rng().fill_bytes(&mut large_value);
rand::rng().fill_bytes(&mut large_value);
// Starts from `a`.
let prefix = format!("{}/", std::char::from_u32(97 + i as u32).unwrap());
@@ -354,8 +354,8 @@ mod tests {
#[tokio::test]
async fn test_meta_state_store_split_value() {
let size_limit = rand::thread_rng().gen_range(128..=512);
let page_size = rand::thread_rng().gen_range(1..10);
let size_limit = rand::rng().random_range(128..=512);
let page_size = rand::rng().random_range(1..10);
let kv_backend = Arc::new(MemoryKvBackend::new());
test_meta_state_store_split_value_with_size_limit(kv_backend, size_limit, page_size, 8192)
.await;
@@ -388,7 +388,7 @@ mod tests {
// However, some KvBackends, the `ChrootKvBackend`, will add the prefix to `key`;
// we don't know the exact size of the key.
let size_limit = 1536 * 1024 - key_size;
let page_size = rand::thread_rng().gen_range(1..10);
let page_size = rand::rng().random_range(1..10);
test_meta_state_store_split_value_with_size_limit(
kv_backend,
size_limit,

View File

@@ -39,7 +39,7 @@ impl RoundRobinTopicSelector {
// The cursor in the round-robin selector is not persisted which may break the round-robin strategy cross crashes.
// Introducing a shuffling strategy may help mitigate this issue.
pub fn with_shuffle() -> Self {
let offset = rand::thread_rng().gen_range(0..64);
let offset = rand::rng().random_range(0..64);
Self {
cursor: AtomicUsize::new(offset),
}

View File

@@ -207,7 +207,7 @@ impl Runner {
if let Some(d) = retry.next() {
let millis = d.as_millis() as u64;
// Add random noise to the retry delay to avoid retry storms.
let noise = rand::thread_rng().gen_range(0..(millis / 4) + 1);
let noise = rand::rng().random_range(0..(millis / 4) + 1);
let d = d.add(Duration::from_millis(noise));
self.wait_on_err(d, retry_times).await;

View File

@@ -22,6 +22,6 @@ static PORTS: OnceCell<AtomicUsize> = OnceCell::new();
/// Return a unique port(in runtime) for test
pub fn get_port() -> usize {
PORTS
.get_or_init(|| AtomicUsize::new(rand::thread_rng().gen_range(13000..13800)))
.get_or_init(|| AtomicUsize::new(rand::rng().random_range(13000..13800)))
.fetch_add(1, Ordering::Relaxed)
}

View File

@@ -715,10 +715,10 @@ mod tests {
TimeUnit::Microsecond,
TimeUnit::Nanosecond,
];
let mut rng = rand::thread_rng();
let unit_idx: usize = rng.gen_range(0..4);
let mut rng = rand::rng();
let unit_idx: usize = rng.random_range(0..4);
let unit = units[unit_idx];
let value: i64 = rng.gen();
let value: i64 = rng.random();
Timestamp::new(value, unit)
}
@@ -745,8 +745,8 @@ mod tests {
/// Generate timestamp less than or equal to `threshold`
fn gen_ts_le(threshold: &Timestamp) -> Timestamp {
let mut rng = rand::thread_rng();
let timestamp = rng.gen_range(i64::MIN..=threshold.value);
let mut rng = rand::rng();
let timestamp = rng.random_range(i64::MIN..=threshold.value);
Timestamp::new(timestamp, threshold.unit)
}

View File

@@ -179,7 +179,7 @@ impl Context<'_, '_> {
) -> CollectionBundle<Batch> {
let (send_port, recv_port) = self.df.make_edge::<_, Toff<Batch>>("constant_batch");
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
for (key, group) in &rows.into_iter().chunk_by(|(_row, ts, _diff)| *ts) {
per_time.entry(key).or_default().extend(group);
}
@@ -233,7 +233,7 @@ impl Context<'_, '_> {
pub fn render_constant(&mut self, rows: Vec<DiffRow>) -> CollectionBundle {
let (send_port, recv_port) = self.df.make_edge::<_, Toff>("constant");
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
for (key, group) in &rows.into_iter().chunk_by(|(_row, ts, _diff)| *ts) {
per_time.entry(key).or_default().extend(group);
}

View File

@@ -67,7 +67,7 @@ impl BloomFilterApplier {
for ((_, mut group), bloom) in locs
.iter()
.zip(start_seg..end_seg)
.group_by(|(x, _)| **x)
.chunk_by(|(x, _)| **x)
.into_iter()
.zip(bfs.iter())
{

View File

@@ -437,9 +437,9 @@ mod tests {
}
fn random_option_bytes(size: usize) -> Option<Vec<u8>> {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
if rng.gen() {
if rng.random() {
let mut buffer = vec![0u8; size];
rng.fill(&mut buffer[..]);
Some(buffer)
@@ -469,11 +469,11 @@ mod tests {
segment_row_count: usize,
) -> (DictionaryValues, ValueSegIds) {
let mut n = row_count;
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut dic_values = Vec::new();
while n > 0 {
let size = rng.gen_range(1..=n);
let size = rng.random_range(1..=n);
let value = random_option_bytes(100);
dic_values.push((value, size));
n -= size;

View File

@@ -535,7 +535,7 @@ mod tests {
.flatten()
.cloned()
.collect::<Vec<_>>();
all_entries.shuffle(&mut rand::thread_rng());
all_entries.shuffle(&mut rand::rng());
let response = logstore.append_batch(all_entries.clone()).await.unwrap();
// 5 region
@@ -575,7 +575,7 @@ mod tests {
warn!("The endpoints is empty, skipping the test 'test_append_batch_basic_large'");
return;
};
let data_size_kb = rand::thread_rng().gen_range(9..31usize);
let data_size_kb = rand::rng().random_range(9..31usize);
info!("Entry size: {}Ki", data_size_kb);
let broker_endpoints = broker_endpoints
.split(',')
@@ -608,7 +608,7 @@ mod tests {
.cloned()
.collect::<Vec<_>>();
assert_matches!(all_entries[0], Entry::MultiplePart(_));
all_entries.shuffle(&mut rand::thread_rng());
all_entries.shuffle(&mut rand::rng());
let response = logstore.append_batch(all_entries.clone()).await.unwrap();
// 5 region

View File

@@ -1058,11 +1058,11 @@ mod tests {
let tx = new_client("test_cluster_client").await;
let in_memory = tx.in_memory().unwrap();
let cluster_client = tx.client.cluster_client().unwrap();
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
// Generates rough 10MB data, which is larger than the default grpc message size limit.
for i in 0..10 {
let data: Vec<u8> = (0..1024 * 1024).map(|_| rng.gen()).collect();
let data: Vec<u8> = (0..1024 * 1024).map(|_| rng.random()).collect();
in_memory
.put(
PutRequest::new()

View File

@@ -75,7 +75,7 @@ impl AskLeader {
let leadership_group = self.leadership_group.read().unwrap();
leadership_group.peers.clone()
};
peers.shuffle(&mut rand::thread_rng());
peers.shuffle(&mut rand::rng());
let req = AskLeaderRequest {
header: Some(RequestHeader::new(

View File

@@ -22,8 +22,8 @@ where
return None;
}
let mut rng = rand::thread_rng();
let i = rng.gen_range(0..len);
let mut rng = rand::rng();
let i = rng.random_range(0..len);
func(i)
}

View File

@@ -36,7 +36,7 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: rand::distributions::WeightedError,
error: rand::distr::weighted::Error,
},
#[snafu(display("Exceeded deadline, operation: {}", operation))]

View File

@@ -543,11 +543,11 @@ pub(crate) mod tests {
assert!(rx.await.unwrap().is_empty());
fn generate_heartbeats(datanode_id: u64, region_ids: Vec<u32>) -> Vec<DatanodeHeartbeat> {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let start = current_time_millis();
(0..2000)
.map(|i| DatanodeHeartbeat {
timestamp: start + i * 1000 + rng.gen_range(0..100),
timestamp: start + i * 1000 + rng.random_range(0..100),
datanode_id,
regions: region_ids
.iter()

View File

@@ -61,7 +61,7 @@ impl Selector for RandomNodeSelector {
type Output = Vec<Peer>;
async fn select(&self, _ctx: &Self::Context, _opts: SelectorOptions) -> Result<Self::Output> {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut nodes = self.nodes.clone();
nodes.shuffle(&mut rng);
Ok(nodes)

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rand::seq::SliceRandom;
use rand::thread_rng;
use rand::rng;
use rand::seq::IndexedRandom;
use snafu::ResultExt;
use crate::error;
@@ -26,7 +26,10 @@ pub trait WeightedChoose<Item>: Send + Sync {
/// The method will choose multiple items.
///
/// Returns less than `amount` items if the weight_array is not enough.
/// ## Note
///
/// - Returns less than `amount` items if the weight_array is not enough.
/// - The returned items cannot be duplicated.
fn choose_multiple(&mut self, amount: usize) -> Result<Vec<Item>>;
/// Returns the length of the weight_array.
@@ -84,7 +87,7 @@ where
// unwrap safety: whether weighted_index is none has been checked before.
let item = self
.items
.choose_weighted(&mut thread_rng(), |item| item.weight as f64)
.choose_weighted(&mut rng(), |item| item.weight as f64)
.context(error::ChooseItemsSnafu)?
.item
.clone();
@@ -92,9 +95,11 @@ where
}
fn choose_multiple(&mut self, amount: usize) -> Result<Vec<Item>> {
let amount = amount.min(self.items.iter().filter(|item| item.weight > 0).count());
Ok(self
.items
.choose_multiple_weighted(&mut thread_rng(), amount, |item| item.weight as f64)
.choose_multiple_weighted(&mut rng(), amount, |item| item.weight as f64)
.context(error::ChooseItemsSnafu)?
.cloned()
.map(|item| item.item)
@@ -127,7 +132,7 @@ mod tests {
for _ in 0..100 {
let ret = choose.choose_multiple(3).unwrap();
assert_eq!(vec![1, 2], ret);
assert_eq!(vec![1], ret);
}
}
}

View File

@@ -28,7 +28,7 @@ use mito2::region::options::MergeMode;
use mito2::row_converter::DensePrimaryKeyCodec;
use mito2::test_util::memtable_util::{self, region_metadata_to_row_schema};
use rand::rngs::ThreadRng;
use rand::seq::SliceRandom;
use rand::seq::IndexedRandom;
use rand::Rng;
use store_api::metadata::{
ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef,
@@ -161,8 +161,8 @@ struct Host {
impl Host {
fn random_with_id(id: usize) -> Host {
let mut rng = rand::thread_rng();
let region = format!("ap-southeast-{}", rng.gen_range(0..10));
let mut rng = rand::rng();
let region = format!("ap-southeast-{}", rng.random_range(0..10));
let datacenter = format!(
"{}{}",
region,
@@ -172,12 +172,12 @@ impl Host {
hostname: format!("host_{id}"),
region,
datacenter,
rack: rng.gen_range(0..100).to_string(),
rack: rng.random_range(0..100).to_string(),
os: "Ubuntu16.04LTS".to_string(),
arch: "x86".to_string(),
team: "CHI".to_string(),
service: rng.gen_range(0..100).to_string(),
service_version: rng.gen_range(0..10).to_string(),
service: rng.random_range(0..100).to_string(),
service_version: rng.random_range(0..10).to_string(),
service_environment: "test".to_string(),
}
}
@@ -254,7 +254,7 @@ impl CpuDataGenerator {
.hosts
.iter()
.map(|host| {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut values = Vec::with_capacity(21);
values.push(api::v1::Value {
value_data: Some(ValueData::TimestampMillisecondValue(current_sec * 1000)),
@@ -288,12 +288,12 @@ impl CpuDataGenerator {
}
fn random_hostname(&self) -> String {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
self.hosts.choose(&mut rng).unwrap().hostname.clone()
}
fn random_f64(rng: &mut ThreadRng) -> f64 {
let base: u32 = rng.gen_range(30..95);
let base: u32 = rng.random_range(30..95);
base as f64
}

View File

@@ -146,14 +146,14 @@ mod test {
#[test]
fn fuzz_index_calculation() {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut data = vec![0u8; 1024 * 1024];
rng.fill_bytes(&mut data);
for _ in 0..FUZZ_REPEAT_TIMES {
let offset = rng.gen_range(0..data.len() as u64);
let size = rng.gen_range(0..data.len() as u32 - offset as u32);
let page_size: usize = rng.gen_range(1..1024);
let offset = rng.random_range(0..data.len() as u64);
let size = rng.random_range(0..data.len() as u32 - offset as u32);
let page_size: usize = rng.random_range(1..1024);
let indexes =
PageKey::generate_page_keys(offset, size, page_size as u64).collect::<Vec<_>>();

View File

@@ -146,14 +146,14 @@ mod test {
#[test]
fn fuzz_index_calculation() {
// randomly generate a large u8 array
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut data = vec![0u8; 1024 * 1024];
rng.fill_bytes(&mut data);
for _ in 0..FUZZ_REPEAT_TIMES {
let offset = rng.gen_range(0..data.len() as u64);
let size = rng.gen_range(0..data.len() as u32 - offset as u32);
let page_size: usize = rng.gen_range(1..1024);
let offset = rng.random_range(0..data.len() as u64);
let size = rng.random_range(0..data.len() as u32 - offset as u32);
let page_size: usize = rng.random_range(1..1024);
let indexes =
PageKey::generate_page_keys(offset, size, page_size as u64).collect::<Vec<_>>();
@@ -357,10 +357,10 @@ mod test {
);
// fuzz test
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
for _ in 0..FUZZ_REPEAT_TIMES {
let offset = rng.gen_range(0..file_size);
let size = rng.gen_range(0..file_size as u32 - offset as u32);
let offset = rng.random_range(0..file_size);
let size = rng.random_range(0..file_size as u32 - offset as u32);
let expected = cached_reader.range_read(offset, size).await.unwrap();
let inner = &cached_reader.inner;
let read = cached_reader

View File

@@ -389,10 +389,10 @@ mod tests {
fn prepare_input_keys(num_keys: usize) -> Vec<Vec<u8>> {
let prefix = ["a", "b", "c", "d", "e", "f"];
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut keys = Vec::with_capacity(num_keys);
for i in 0..num_keys {
let prefix_idx = rng.gen_range(0..prefix.len());
let prefix_idx = rng.random_range(0..prefix.len());
// We don't need to decode the primary key in index's test so we format the string
// into the key.
let key = format!("{}{}", prefix[prefix_idx], i);

View File

@@ -509,7 +509,7 @@ impl ParquetReaderBuilder {
(row_group_id, rg_begin_row_id..rg_end_row_id)
})
.group_by(|(row_group_id, _)| *row_group_id);
.chunk_by(|(row_group_id, _)| *row_group_id);
let ranges_in_row_groups = grouped_in_row_groups
.into_iter()

View File

@@ -38,7 +38,7 @@ use common_telemetry::{error, info, warn};
use futures::future::try_join_all;
use object_store::manager::ObjectStoreManagerRef;
use prometheus::IntGauge;
use rand::{thread_rng, Rng};
use rand::{rng, Rng};
use snafu::{ensure, ResultExt};
use store_api::logstore::LogStore;
use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
@@ -390,7 +390,7 @@ async fn write_cache_from_config(
/// Computes a initial check delay for a worker.
pub(crate) fn worker_init_check_delay() -> Duration {
let init_check_delay = thread_rng().gen_range(0..MAX_INITIAL_CHECK_DELAY_SECS);
let init_check_delay = rng().random_range(0..MAX_INITIAL_CHECK_DELAY_SECS);
Duration::from_secs(init_check_delay)
}

View File

@@ -28,7 +28,7 @@ use crate::QueryEngineRef;
pub fn create_query_engine_for_vector10x3() -> QueryEngineRef {
let mut column_schemas = vec![];
let mut columns = vec![];
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let column_name = "vector";
let column_schema = ColumnSchema::new(column_name, ConcreteDataType::binary_datatype(), true);
@@ -37,9 +37,9 @@ pub fn create_query_engine_for_vector10x3() -> QueryEngineRef {
let vectors = (0..10)
.map(|_| {
let veclit = [
rng.gen_range(-100f32..100.0),
rng.gen_range(-100f32..100.0),
rng.gen_range(-100f32..100.0),
rng.random_range(-100f32..100.0),
rng.random_range(-100f32..100.0),
rng.random_range(-100f32..100.0),
];
veclit_to_binlit(&veclit)
})

View File

@@ -92,7 +92,7 @@ impl MysqlInstanceShim {
) -> MysqlInstanceShim {
// init a random salt
let mut bs = vec![0u8; 20];
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
rng.fill_bytes(bs.as_mut());
let mut scramble: [u8; 20] = [0; 20];

View File

@@ -26,7 +26,6 @@ use datatypes::schema::{ColumnSchema, Schema};
use datatypes::value::Value;
use mysql_async::prelude::*;
use mysql_async::{Conn, Row, SslOpts};
use rand::rngs::StdRng;
use rand::Rng;
use servers::error::Result;
use servers::install_ring_crypto_provider;
@@ -426,13 +425,11 @@ async fn test_query_concurrently() -> Result<()> {
let mut join_handles = vec![];
for _ in 0..threads {
join_handles.push(tokio::spawn(async move {
let mut rand: StdRng = rand::SeedableRng::from_entropy();
let mut connection = create_connection_default_db_name(server_port, false)
.await
.unwrap();
for _ in 0..expect_executed_queries_per_worker {
let expected: u32 = rand.gen_range(0..100);
let expected: u32 = rand::rng().random_range(0..100);
let result: u32 = connection
.query_first(format!(
"SELECT uint32s FROM numbers WHERE uint32s = {expected}"

View File

@@ -22,7 +22,6 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_runtime::runtime::BuilderBuild;
use common_runtime::Builder as RuntimeBuilder;
use pgwire::api::Type;
use rand::rngs::StdRng;
use rand::Rng;
use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
use rustls::{Error, SignatureScheme};
@@ -202,12 +201,10 @@ async fn test_query_pg_concurrently() -> Result<()> {
let mut join_handles = vec![];
for _i in 0..threads {
join_handles.push(tokio::spawn(async move {
let mut rand: StdRng = rand::SeedableRng::from_entropy();
let mut client = create_plain_connection(server_port, false).await.unwrap();
for _k in 0..expect_executed_queries_per_worker {
let expected: u32 = rand.gen_range(0..100);
let expected: u32 = rand::rng().random_range(0..100);
let result: u32 = unwrap_results(
client
.simple_query(&format!(