chore: update rust to nightly 2025-10-01 (#7069)

* chore: update rust to nightly 2025-10-01

Signed-off-by: luofucong <luofc@foxmail.com>

* chore: nix update

---------

Signed-off-by: luofucong <luofc@foxmail.com>
Co-authored-by: Ning Sun <sunning@greptime.com>
This commit is contained in:
LFC
2025-10-11 15:30:52 +08:00
committed by GitHub
parent 40e9ce90a7
commit 8fe17d43d5
217 changed files with 523 additions and 647 deletions

View File

@@ -89,10 +89,7 @@ mod test {
let runtime = common_runtime::global_runtime().clone();
let greptime_request_handler = GreptimeRequestHandler::new(
ServerGrpcQueryHandlerAdapter::arc(db.frontend.instance.clone()),
user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd".to_string(),
)
.ok(),
user_provider_from_option("static_user_provider:cmd:greptime_user=greptime_pwd").ok(),
Some(runtime.clone()),
FlightCompression::default(),
);

View File

@@ -12,126 +12,126 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_util::future::BoxFuture;
use http::Uri;
use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tower::Service;
struct NetworkTrafficMonitorableConnector {
interested_tx: mpsc::Sender<String>,
}
impl Service<Uri> for NetworkTrafficMonitorableConnector {
type Response = TokioIo<CollectGrpcResponseFrameTypeStream>;
type Error = String;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, uri: Uri) -> Self::Future {
let frame_types = self.interested_tx.clone();
Box::pin(async move {
let addr = format!(
"{}:{}",
uri.host().unwrap_or("localhost"),
uri.port_u16().unwrap_or(4001),
);
let inner = TcpStream::connect(addr).await.map_err(|e| e.to_string())?;
Ok(TokioIo::new(CollectGrpcResponseFrameTypeStream {
inner,
frame_types,
}))
})
}
}
struct CollectGrpcResponseFrameTypeStream {
inner: TcpStream,
frame_types: mpsc::Sender<String>,
}
impl AsyncRead for CollectGrpcResponseFrameTypeStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let before_len = buf.filled().len();
let result = Pin::new(&mut self.inner).poll_read(cx, buf);
if let Poll::Ready(Ok(())) = &result {
let after_len = buf.filled().len();
let new_data = &buf.filled()[before_len..after_len];
if let Some(frame_type) = maybe_decode_frame_type(new_data)
&& let Err(_) = self.frame_types.try_send(frame_type.to_string())
{
return Poll::Ready(Err(io::Error::other("interested party has gone")));
}
}
result
}
}
fn maybe_decode_frame_type(data: &[u8]) -> Option<&str> {
(data.len() >= 9).then(|| match data[3] {
0x0 => "DATA",
0x1 => "HEADERS",
0x2 => "PRIORITY",
0x3 => "RST_STREAM",
0x4 => "SETTINGS",
0x5 => "PUSH_PROMISE",
0x6 => "PING",
0x7 => "GOAWAY",
0x8 => "WINDOW_UPDATE",
0x9 => "CONTINUATION",
_ => "UNKNOWN",
})
}
impl AsyncWrite for CollectGrpcResponseFrameTypeStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
#[cfg(test)]
mod tests {
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use futures_util::future::BoxFuture;
use http::Uri;
use hyper_util::rt::TokioIo;
use servers::grpc::GrpcServerConfig;
use servers::server::Server;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tower::Service;
use super::*;
use crate::test_util::{StorageType, setup_grpc_server_with};
struct NetworkTrafficMonitorableConnector {
interested_tx: mpsc::Sender<String>,
}
impl Service<Uri> for NetworkTrafficMonitorableConnector {
type Response = TokioIo<CollectGrpcResponseFrameTypeStream>;
type Error = String;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, uri: Uri) -> Self::Future {
let frame_types = self.interested_tx.clone();
Box::pin(async move {
let addr = format!(
"{}:{}",
uri.host().unwrap_or("localhost"),
uri.port_u16().unwrap_or(4001),
);
let inner = TcpStream::connect(addr).await.map_err(|e| e.to_string())?;
Ok(TokioIo::new(CollectGrpcResponseFrameTypeStream {
inner,
frame_types,
}))
})
}
}
struct CollectGrpcResponseFrameTypeStream {
inner: TcpStream,
frame_types: mpsc::Sender<String>,
}
impl AsyncRead for CollectGrpcResponseFrameTypeStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let before_len = buf.filled().len();
let result = Pin::new(&mut self.inner).poll_read(cx, buf);
if let Poll::Ready(Ok(())) = &result {
let after_len = buf.filled().len();
let new_data = &buf.filled()[before_len..after_len];
if let Some(frame_type) = maybe_decode_frame_type(new_data)
&& let Err(_) = self.frame_types.try_send(frame_type.to_string())
{
return Poll::Ready(Err(io::Error::other("interested party has gone")));
}
}
result
}
}
fn maybe_decode_frame_type(data: &[u8]) -> Option<&str> {
(data.len() >= 9).then(|| match data[3] {
0x0 => "DATA",
0x1 => "HEADERS",
0x2 => "PRIORITY",
0x3 => "RST_STREAM",
0x4 => "SETTINGS",
0x5 => "PUSH_PROMISE",
0x6 => "PING",
0x7 => "GOAWAY",
0x8 => "WINDOW_UPDATE",
0x9 => "CONTINUATION",
_ => "UNKNOWN",
})
}
impl AsyncWrite for CollectGrpcResponseFrameTypeStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_grpc_max_connection_age() {
let config = GrpcServerConfig {

View File

@@ -275,7 +275,7 @@ pub(crate) async fn standalone_with_kafka_wal() -> Option<Box<dyn RebuildableMoc
..Default::default()
},
kafka_topic: KafkaTopicConfig {
topic_name_prefix: test_name.to_string(),
topic_name_prefix: test_name.clone(),
num_topics: 3,
..Default::default()
},
@@ -314,7 +314,7 @@ pub(crate) async fn distributed_with_kafka_wal() -> Option<Box<dyn RebuildableMo
..Default::default()
},
kafka_topic: KafkaTopicConfig {
topic_name_prefix: test_name.to_string(),
topic_name_prefix: test_name.clone(),
num_topics: 3,
..Default::default()
},

View File

@@ -229,10 +229,8 @@ pub async fn test_grpc_message_size_limit_recv(store_type: StorageType) {
}
pub async fn test_grpc_auth(store_type: StorageType) {
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd".to_string(),
)
.unwrap();
let user_provider =
user_provider_from_option("static_user_provider:cmd:greptime_user=greptime_pwd").unwrap();
let (_db, fe_grpc_server) =
setup_grpc_server_with_user_provider(store_type, "auto_create_table", Some(user_provider))
.await;
@@ -282,10 +280,8 @@ pub async fn test_grpc_auth(store_type: StorageType) {
}
pub async fn test_otel_arrow_auth(store_type: StorageType) {
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd".to_string(),
)
.unwrap();
let user_provider =
user_provider_from_option("static_user_provider:cmd:greptime_user=greptime_pwd").unwrap();
let (_db, fe_grpc_server) = setup_grpc_server_with_user_provider(
store_type,
"test_otel_arrow_auth",

View File

@@ -150,7 +150,7 @@ pub async fn test_http_auth(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd,readonly_user:ro=readonly_pwd,writeonly_user:wo=writeonly_pwd".to_string(),
"static_user_provider:cmd:greptime_user=greptime_pwd,readonly_user:ro=readonly_pwd,writeonly_user:wo=writeonly_pwd",
)
.unwrap();

View File

@@ -88,7 +88,7 @@ macro_rules! sql_tests {
pub async fn test_mysql_auth(store_type: StorageType) {
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd,readonly_user:ro=readonly_pwd,writeonly_user:wo=writeonly_pwd".to_string(),
"static_user_provider:cmd:greptime_user=greptime_pwd,readonly_user:ro=readonly_pwd,writeonly_user:wo=writeonly_pwd",
)
.unwrap();
@@ -463,10 +463,8 @@ pub async fn test_mysql_timezone(store_type: StorageType) {
}
pub async fn test_postgres_auth(store_type: StorageType) {
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd".to_string(),
)
.unwrap();
let user_provider =
user_provider_from_option("static_user_provider:cmd:greptime_user=greptime_pwd").unwrap();
let (mut guard, fe_pg_server) =
setup_pg_server_with_user_provider(store_type, "sql_crud", Some(user_provider)).await;
@@ -1185,7 +1183,7 @@ pub async fn test_mysql_async_timestamp(store_type: StorageType) {
.await
.expect("create table failure");
let metrics = vec![
let metrics = [
CpuMetric::new(
"host0".into(),
"test".into(),