Skip to main content

meta_client/
client.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15mod ask_leader;
16mod config;
17pub mod heartbeat;
18mod load_balance;
19mod procedure;
20
21mod cluster;
22mod store;
23mod util;
24
25use std::fmt::Debug;
26use std::sync::Arc;
27use std::time::Duration;
28
29use api::v1::meta::heartbeat_request::NodeWorkloads;
30use api::v1::meta::{
31    MetasrvNodeInfo, ProcedureDetailResponse, ReconcileRequest, ReconcileResponse, Role,
32};
33pub use ask_leader::{AskLeader, LeaderProvider, LeaderProviderRef};
34use cluster::Client as ClusterClient;
35pub use cluster::ClusterKvBackend;
36use common_error::ext::BoxedError;
37use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
38use common_meta::cluster::{
39    ClusterInfo, MetasrvStatus, NodeInfo, NodeInfoKey, NodeStatus, Role as ClusterRole,
40};
41use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue, RegionStat};
42use common_meta::distributed_time_constants::default_distributed_time_constants;
43use common_meta::error::{
44    self as meta_error, ExternalSnafu, Result as MetaResult, UnsupportedSnafu,
45};
46use common_meta::key::flow::flow_state::{FlowStat, FlowStateManager};
47use common_meta::kv_backend::KvBackendRef;
48use common_meta::peer::{Peer, PeerDiscovery};
49use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
50use common_meta::range_stream::PaginationStream;
51use common_meta::rpc::KeyValue;
52use common_meta::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
53use common_meta::rpc::procedure::{
54    AddRegionFollowerRequest, AddTableFollowerRequest, GcRegionsRequest, GcResponse,
55    GcTableRequest, ManageRegionFollowerRequest, MigrateRegionRequest, MigrateRegionResponse,
56    ProcedureStateResponse, RemoveRegionFollowerRequest, RemoveTableFollowerRequest,
57};
58use common_meta::rpc::store::{
59    BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
60    BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
61    DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
62};
63use common_options::plugin_options::PluginOptionsDeserializer;
64use common_telemetry::info;
65use common_time::util::DefaultSystemTimer;
66use config::Client as ConfigClient;
67use futures::TryStreamExt;
68use heartbeat::{Client as HeartbeatClient, HeartbeatConfig};
69use procedure::Client as ProcedureClient;
70use serde::de::DeserializeOwned;
71use snafu::{OptionExt, ResultExt};
72use store::Client as StoreClient;
73
74pub use self::heartbeat::{HeartbeatSender, HeartbeatStream};
75use crate::client::ask_leader::{LeaderProviderFactoryImpl, LeaderProviderFactoryRef};
76use crate::error::{
77    ConvertMetaConfigSnafu, ConvertMetaRequestSnafu, ConvertMetaResponseSnafu, Error,
78    GetFlowStatSnafu, NotStartedSnafu, Result,
79};
80
81pub type Id = u64;
82
83const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3;
84const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3;
85const DEFAULT_CLUSTER_CLIENT_MAX_RETRY: usize = 3;
86const DEFAULT_DDL_TIMEOUT: Duration = Duration::from_secs(10);
87
88#[derive(Clone, Debug, Default)]
89pub struct MetaClientBuilder {
90    id: Id,
91    role: Role,
92    enable_heartbeat: bool,
93    enable_store: bool,
94    #[cfg(test)]
95    enable_direct_store_writes: bool,
96    enable_procedure: bool,
97    enable_access_cluster_info: bool,
98    region_follower: Option<RegionFollowerClientRef>,
99    channel_manager: Option<ChannelManager>,
100    ddl_channel_manager: Option<ChannelManager>,
101    /// The default ddl timeout for each request.
102    ddl_timeout: Option<Duration>,
103    heartbeat_channel_manager: Option<ChannelManager>,
104}
105
106impl MetaClientBuilder {
107    pub fn new(member_id: u64, role: Role) -> Self {
108        Self {
109            id: member_id,
110            role,
111            ..Default::default()
112        }
113    }
114
115    /// Returns the role of Frontend's default options.
116    pub fn frontend_default_options() -> Self {
117        // Frontend does not need a member id.
118        Self::new(0, Role::Frontend)
119            .enable_store()
120            .enable_heartbeat()
121            .enable_procedure()
122            .enable_access_cluster_info()
123    }
124
125    /// Returns the role of Datanode's default options.
126    pub fn datanode_default_options(member_id: u64) -> Self {
127        Self::new(member_id, Role::Datanode)
128            .enable_store()
129            .enable_heartbeat()
130    }
131
132    /// Returns the role of Flownode's default options.
133    pub fn flownode_default_options(member_id: u64) -> Self {
134        Self::new(member_id, Role::Flownode)
135            .enable_store()
136            .enable_heartbeat()
137            .enable_procedure()
138            .enable_access_cluster_info()
139    }
140
141    pub fn enable_heartbeat(self) -> Self {
142        Self {
143            enable_heartbeat: true,
144            ..self
145        }
146    }
147
148    /// Enables the Store client in read-only mode.
149    ///
150    /// Store write methods fail fast by default. Metadata writes from production
151    /// frontend/datanode/flownode clients should go through metasrv procedures.
152    pub fn enable_store(self) -> Self {
153        Self {
154            enable_store: true,
155            ..self
156        }
157    }
158
159    /// Enables direct Store write RPCs for tests.
160    ///
161    /// Production metadata writes should use metasrv-owned write paths instead.
162    #[cfg(test)]
163    pub(super) fn enable_direct_store_writes_for_test(self) -> Self {
164        Self {
165            enable_store: true,
166            enable_direct_store_writes: true,
167            ..self
168        }
169    }
170
171    pub fn enable_procedure(self) -> Self {
172        Self {
173            enable_procedure: true,
174            ..self
175        }
176    }
177
178    pub fn enable_access_cluster_info(self) -> Self {
179        Self {
180            enable_access_cluster_info: true,
181            ..self
182        }
183    }
184
185    pub fn channel_manager(self, channel_manager: ChannelManager) -> Self {
186        Self {
187            channel_manager: Some(channel_manager),
188            ..self
189        }
190    }
191
192    pub fn ddl_channel_manager(self, channel_manager: ChannelManager) -> Self {
193        Self {
194            ddl_channel_manager: Some(channel_manager),
195            ..self
196        }
197    }
198
199    pub fn ddl_timeout(self, timeout: Duration) -> Self {
200        Self {
201            ddl_timeout: Some(timeout),
202            ..self
203        }
204    }
205
206    pub fn heartbeat_channel_manager(self, channel_manager: ChannelManager) -> Self {
207        Self {
208            heartbeat_channel_manager: Some(channel_manager),
209            ..self
210        }
211    }
212
213    pub fn with_region_follower(self, region_follower: RegionFollowerClientRef) -> Self {
214        Self {
215            region_follower: Some(region_follower),
216            ..self
217        }
218    }
219
220    pub fn build(self) -> MetaClient {
221        let mgr = self.channel_manager.unwrap_or_default();
222        let heartbeat_channel_manager = self
223            .heartbeat_channel_manager
224            .clone()
225            .unwrap_or_else(|| mgr.clone());
226
227        let heartbeat = self.enable_heartbeat.then(|| {
228            if self.heartbeat_channel_manager.is_some() {
229                info!("Enable heartbeat channel using the heartbeat channel manager.");
230            }
231
232            HeartbeatClient::new(self.id, self.role, heartbeat_channel_manager.clone())
233        });
234        let config = self
235            .enable_heartbeat
236            .then(|| ConfigClient::new(self.id, self.role, mgr.clone()));
237        let store = self.enable_store.then(|| {
238            #[cfg(test)]
239            {
240                if self.enable_direct_store_writes {
241                    return StoreClient::new_writable(self.id, self.role, mgr.clone());
242                }
243            }
244
245            StoreClient::new(self.id, self.role, mgr.clone())
246        });
247        let procedure = self.enable_procedure.then(|| {
248            let mgr = self.ddl_channel_manager.unwrap_or(mgr.clone());
249            ProcedureClient::new(
250                self.id,
251                self.role,
252                mgr,
253                DEFAULT_SUBMIT_DDL_MAX_RETRY,
254                self.ddl_timeout.unwrap_or(DEFAULT_DDL_TIMEOUT),
255            )
256        });
257        let cluster = self
258            .enable_access_cluster_info
259            .then(|| ClusterClient::new(mgr.clone(), DEFAULT_CLUSTER_CLIENT_MAX_RETRY));
260        let region_follower = self.region_follower.clone();
261
262        MetaClient {
263            id: self.id,
264            channel_manager: mgr.clone(),
265            leader_provider_factory: Arc::new(LeaderProviderFactoryImpl::new(
266                self.id,
267                self.role,
268                DEFAULT_ASK_LEADER_MAX_RETRY,
269                heartbeat_channel_manager,
270            )),
271            heartbeat,
272            config,
273            store,
274            procedure,
275            cluster,
276            region_follower,
277        }
278    }
279}
280
281#[derive(Debug)]
282pub struct MetaClient {
283    id: Id,
284    channel_manager: ChannelManager,
285    leader_provider_factory: LeaderProviderFactoryRef,
286    heartbeat: Option<HeartbeatClient>,
287    config: Option<ConfigClient>,
288    store: Option<StoreClient>,
289    procedure: Option<ProcedureClient>,
290    cluster: Option<ClusterClient>,
291    region_follower: Option<RegionFollowerClientRef>,
292}
293
294impl MetaClient {
295    pub fn new(id: Id, role: Role) -> Self {
296        Self {
297            id,
298            channel_manager: ChannelManager::default(),
299            leader_provider_factory: Arc::new(LeaderProviderFactoryImpl::new(
300                id,
301                role,
302                DEFAULT_ASK_LEADER_MAX_RETRY,
303                ChannelManager::default(),
304            )),
305            heartbeat: None,
306            config: None,
307            store: None,
308            procedure: None,
309            cluster: None,
310            region_follower: None,
311        }
312    }
313}
314
315pub type RegionFollowerClientRef = Arc<dyn RegionFollowerClient>;
316
317/// A trait for clients that can manage region followers.
318#[async_trait::async_trait]
319pub trait RegionFollowerClient: Sync + Send + Debug {
320    async fn add_region_follower(&self, request: AddRegionFollowerRequest) -> Result<()>;
321
322    async fn remove_region_follower(&self, request: RemoveRegionFollowerRequest) -> Result<()>;
323
324    async fn add_table_follower(&self, request: AddTableFollowerRequest) -> Result<()>;
325
326    async fn remove_table_follower(&self, request: RemoveTableFollowerRequest) -> Result<()>;
327
328    async fn start(&self, urls: &[&str]) -> Result<()>;
329
330    async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()>;
331}
332
333#[async_trait::async_trait]
334impl ProcedureExecutor for MetaClient {
335    async fn submit_ddl_task(
336        &self,
337        _ctx: &ExecutorContext,
338        request: SubmitDdlTaskRequest,
339    ) -> MetaResult<SubmitDdlTaskResponse> {
340        self.submit_ddl_task(request)
341            .await
342            .map_err(BoxedError::new)
343            .context(meta_error::ExternalSnafu)
344    }
345
346    async fn migrate_region(
347        &self,
348        _ctx: &ExecutorContext,
349        request: MigrateRegionRequest,
350    ) -> MetaResult<MigrateRegionResponse> {
351        self.migrate_region(request)
352            .await
353            .map_err(BoxedError::new)
354            .context(meta_error::ExternalSnafu)
355    }
356
357    async fn reconcile(
358        &self,
359        _ctx: &ExecutorContext,
360        request: ReconcileRequest,
361    ) -> MetaResult<ReconcileResponse> {
362        self.reconcile(request)
363            .await
364            .map_err(BoxedError::new)
365            .context(meta_error::ExternalSnafu)
366    }
367
368    async fn manage_region_follower(
369        &self,
370        _ctx: &ExecutorContext,
371        request: ManageRegionFollowerRequest,
372    ) -> MetaResult<()> {
373        if let Some(region_follower) = &self.region_follower {
374            match request {
375                ManageRegionFollowerRequest::AddRegionFollower(add_region_follower_request) => {
376                    region_follower
377                        .add_region_follower(add_region_follower_request)
378                        .await
379                }
380                ManageRegionFollowerRequest::RemoveRegionFollower(
381                    remove_region_follower_request,
382                ) => {
383                    region_follower
384                        .remove_region_follower(remove_region_follower_request)
385                        .await
386                }
387                ManageRegionFollowerRequest::AddTableFollower(add_table_follower_request) => {
388                    region_follower
389                        .add_table_follower(add_table_follower_request)
390                        .await
391                }
392                ManageRegionFollowerRequest::RemoveTableFollower(remove_table_follower_request) => {
393                    region_follower
394                        .remove_table_follower(remove_table_follower_request)
395                        .await
396                }
397            }
398            .map_err(BoxedError::new)
399            .context(meta_error::ExternalSnafu)
400        } else {
401            UnsupportedSnafu {
402                operation: "manage_region_follower",
403            }
404            .fail()
405        }
406    }
407
408    async fn query_procedure_state(
409        &self,
410        _ctx: &ExecutorContext,
411        pid: &str,
412    ) -> MetaResult<ProcedureStateResponse> {
413        self.query_procedure_state(pid)
414            .await
415            .map_err(BoxedError::new)
416            .context(meta_error::ExternalSnafu)
417    }
418
419    async fn gc_regions(
420        &self,
421        _ctx: &ExecutorContext,
422        request: GcRegionsRequest,
423    ) -> MetaResult<GcResponse> {
424        self.gc_regions(request)
425            .await
426            .map_err(BoxedError::new)
427            .context(meta_error::ExternalSnafu)
428    }
429
430    async fn gc_table(
431        &self,
432        _ctx: &ExecutorContext,
433        request: GcTableRequest,
434    ) -> MetaResult<GcResponse> {
435        self.gc_table(request)
436            .await
437            .map_err(BoxedError::new)
438            .context(meta_error::ExternalSnafu)
439    }
440
441    async fn list_procedures(&self, _ctx: &ExecutorContext) -> MetaResult<ProcedureDetailResponse> {
442        self.procedure_client()
443            .map_err(BoxedError::new)
444            .context(meta_error::ExternalSnafu)?
445            .list_procedures()
446            .await
447            .map_err(BoxedError::new)
448            .context(meta_error::ExternalSnafu)
449    }
450}
451
452// TODO(zyy17): Allow deprecated fields for backward compatibility. Remove this when the deprecated fields are removed from the proto.
453#[allow(deprecated)]
454#[async_trait::async_trait]
455impl ClusterInfo for MetaClient {
456    type Error = Error;
457
458    async fn list_nodes(&self, role: Option<ClusterRole>) -> Result<Vec<NodeInfo>> {
459        let cluster_client = self.cluster_client()?;
460
461        let (get_metasrv_nodes, nodes_key_prefix) = match role {
462            None => (true, Some(NodeInfoKey::key_prefix())),
463            Some(ClusterRole::Metasrv) => (true, None),
464            Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))),
465        };
466
467        let mut nodes = if get_metasrv_nodes {
468            let last_activity_ts = -1; // Metasrv does not provide this information.
469
470            let (leader, followers): (Option<MetasrvNodeInfo>, Vec<MetasrvNodeInfo>) =
471                cluster_client.get_metasrv_peers().await?;
472            followers
473                .into_iter()
474                .map(|node| {
475                    if let Some(node_info) = node.info {
476                        NodeInfo {
477                            peer: node.peer.unwrap_or_default(),
478                            last_activity_ts,
479                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: false }),
480                            version: node_info.version,
481                            git_commit: node_info.git_commit,
482                            start_time_ms: node_info.start_time_ms,
483                            total_cpu_millicores: node_info.total_cpu_millicores,
484                            total_memory_bytes: node_info.total_memory_bytes,
485                            cpu_usage_millicores: node_info.cpu_usage_millicores,
486                            memory_usage_bytes: node_info.memory_usage_bytes,
487                            hostname: node_info.hostname,
488                            env_vars: Default::default(),
489                        }
490                    } else {
491                        // TODO(zyy17): It's for backward compatibility. Remove this when the deprecated fields are removed from the proto.
492                        NodeInfo {
493                            peer: node.peer.unwrap_or_default(),
494                            last_activity_ts,
495                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: false }),
496                            version: node.version,
497                            git_commit: node.git_commit,
498                            start_time_ms: node.start_time_ms,
499                            total_cpu_millicores: node.cpus as i64,
500                            total_memory_bytes: node.memory_bytes as i64,
501                            cpu_usage_millicores: 0,
502                            memory_usage_bytes: 0,
503                            hostname: "".to_string(),
504                            env_vars: Default::default(),
505                        }
506                    }
507                })
508                .chain(leader.into_iter().map(|node| {
509                    if let Some(node_info) = node.info {
510                        NodeInfo {
511                            peer: node.peer.unwrap_or_default(),
512                            last_activity_ts,
513                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: true }),
514                            version: node_info.version,
515                            git_commit: node_info.git_commit,
516                            start_time_ms: node_info.start_time_ms,
517                            total_cpu_millicores: node_info.total_cpu_millicores,
518                            total_memory_bytes: node_info.total_memory_bytes,
519                            cpu_usage_millicores: node_info.cpu_usage_millicores,
520                            memory_usage_bytes: node_info.memory_usage_bytes,
521                            hostname: node_info.hostname,
522                            env_vars: Default::default(),
523                        }
524                    } else {
525                        // TODO(zyy17): It's for backward compatibility. Remove this when the deprecated fields are removed from the proto.
526                        NodeInfo {
527                            peer: node.peer.unwrap_or_default(),
528                            last_activity_ts,
529                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: true }),
530                            version: node.version,
531                            git_commit: node.git_commit,
532                            start_time_ms: node.start_time_ms,
533                            total_cpu_millicores: node.cpus as i64,
534                            total_memory_bytes: node.memory_bytes as i64,
535                            cpu_usage_millicores: 0,
536                            memory_usage_bytes: 0,
537                            hostname: "".to_string(),
538                            env_vars: Default::default(),
539                        }
540                    }
541                }))
542                .collect::<Vec<_>>()
543        } else {
544            Vec::new()
545        };
546
547        if let Some(prefix) = nodes_key_prefix {
548            let req = RangeRequest::new().with_prefix(prefix);
549            let res = cluster_client.range(req).await?;
550            for kv in res.kvs {
551                nodes.push(NodeInfo::try_from(kv.value).context(ConvertMetaResponseSnafu)?);
552            }
553        }
554
555        Ok(nodes)
556    }
557
558    async fn list_region_stats(&self) -> Result<Vec<RegionStat>> {
559        let cluster_kv_backend = Arc::new(self.cluster_client()?);
560        let range_prefix = DatanodeStatKey::prefix_key();
561        let req = RangeRequest::new().with_prefix(range_prefix);
562        let stream =
563            PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream();
564        let mut datanode_stats = stream
565            .try_collect::<Vec<_>>()
566            .await
567            .context(ConvertMetaResponseSnafu)?;
568        let region_stats = datanode_stats
569            .iter_mut()
570            .flat_map(|datanode_stat| {
571                let last = datanode_stat.stats.pop();
572                last.map(|stat| stat.region_stats).unwrap_or_default()
573            })
574            .collect::<Vec<_>>();
575
576        Ok(region_stats)
577    }
578
579    async fn list_flow_stats(&self) -> Result<Option<FlowStat>> {
580        let cluster_backend = ClusterKvBackend::new(Arc::new(self.cluster_client()?));
581        let cluster_backend = Arc::new(cluster_backend) as KvBackendRef;
582        let flow_state_manager = FlowStateManager::new(cluster_backend);
583        let res = flow_state_manager.get().await.context(GetFlowStatSnafu)?;
584
585        Ok(res.map(|r| r.into()))
586    }
587}
588
589// TODO(weny): the discovery using client side timestamp may be inaccurate,
590// maybe we need to use the timestamp from metasrv in the future.
591#[async_trait::async_trait]
592impl PeerDiscovery for MetaClient {
593    async fn active_frontends(&self) -> MetaResult<Vec<Peer>> {
594        let nodes = self
595            .list_nodes(Some(ClusterRole::Frontend))
596            .await
597            .map_err(BoxedError::new)
598            .context(ExternalSnafu)?;
599        Ok(util::alive_frontends(
600            &DefaultSystemTimer,
601            nodes,
602            // TODO(weny): the heartbeat interval should be received from metasrv
603            // instead of using the default value.
604            default_distributed_time_constants().frontend_heartbeat_interval,
605        ))
606    }
607
608    async fn active_datanodes(
609        &self,
610        filter: Option<for<'a> fn(&'a NodeWorkloads) -> bool>,
611    ) -> MetaResult<Vec<Peer>> {
612        let nodes = self
613            .list_nodes(Some(ClusterRole::Datanode))
614            .await
615            .map_err(BoxedError::new)
616            .context(ExternalSnafu)?;
617        Ok(util::alive_datanodes(
618            &DefaultSystemTimer,
619            nodes,
620            default_distributed_time_constants().datanode_lease,
621            filter,
622        ))
623    }
624
625    async fn active_flownodes(
626        &self,
627        filter: Option<for<'a> fn(&'a NodeWorkloads) -> bool>,
628    ) -> MetaResult<Vec<Peer>> {
629        let nodes = self
630            .list_nodes(Some(ClusterRole::Flownode))
631            .await
632            .map_err(BoxedError::new)
633            .context(ExternalSnafu)?;
634        Ok(util::alive_flownodes(
635            &DefaultSystemTimer,
636            nodes,
637            default_distributed_time_constants().flownode_lease,
638            filter,
639        ))
640    }
641}
642
643fn decode_stats(kv: KeyValue) -> MetaResult<DatanodeStatValue> {
644    DatanodeStatValue::try_from(kv.value)
645        .map_err(BoxedError::new)
646        .context(ExternalSnafu)
647}
648
649impl MetaClient {
650    pub async fn start<U, A>(&mut self, urls: A) -> Result<()>
651    where
652        U: AsRef<str>,
653        A: AsRef<[U]> + Clone,
654    {
655        info!("MetaClient channel config: {:?}", self.channel_config());
656
657        let urls = urls.as_ref().iter().map(|u| u.as_ref()).collect::<Vec<_>>();
658        let leader_provider = self.leader_provider_factory.create(&urls);
659
660        self.start_with(leader_provider, urls).await
661    }
662
663    /// Start the client with a [LeaderProvider] and other Metasrv peers' addresses.
664    pub(crate) async fn start_with<U, A>(
665        &mut self,
666        leader_provider: LeaderProviderRef,
667        peers: A,
668    ) -> Result<()>
669    where
670        U: AsRef<str>,
671        A: AsRef<[U]> + Clone,
672    {
673        if let Some(client) = &self.region_follower {
674            info!("Starting region follower client ...");
675            client.start_with(leader_provider.clone()).await?;
676        }
677
678        if let Some(client) = &self.heartbeat {
679            info!("Starting heartbeat client ...");
680            client.start_with(leader_provider.clone()).await?;
681        }
682
683        if let Some(client) = &self.config {
684            info!("Starting config client ...");
685            client.start_with(leader_provider.clone()).await?;
686        }
687
688        if let Some(client) = &mut self.store {
689            info!("Starting store client ...");
690            client.start(peers.clone()).await?;
691        }
692
693        if let Some(client) = &self.procedure {
694            info!("Starting procedure client ...");
695            client.start_with(leader_provider.clone()).await?;
696        }
697
698        if let Some(client) = &mut self.cluster {
699            info!("Starting cluster client ...");
700            client.start_with(leader_provider).await?;
701        }
702        Ok(())
703    }
704
705    /// Ask the leader address of `metasrv`, and the heartbeat component
706    /// needs to create a bidirectional streaming to the leader.
707    pub async fn ask_leader(&self) -> Result<String> {
708        self.heartbeat_client()?.ask_leader().await
709    }
710
711    pub async fn pull_config<T, U>(&self, deserializer: T) -> Result<U>
712    where
713        T: PluginOptionsDeserializer<U>,
714        U: DeserializeOwned,
715    {
716        let res = self.config_client()?.pull_config().await?;
717        let v = deserializer
718            .deserialize(&res.payload)
719            .context(ConvertMetaConfigSnafu)?;
720        Ok(v)
721    }
722
723    /// Returns a heartbeat bidirectional streaming: (sender, receiver), the
724    /// other end is the leader of `metasrv`.
725    ///
726    /// The `datanode` needs to use the sender to continuously send heartbeat
727    /// packets (some self-state data), and the receiver can receive a response
728    /// from "metasrv" (which may contain some scheduling instructions).
729    ///
730    /// Returns the heartbeat sender, stream, and configuration received from Metasrv.
731    pub async fn heartbeat(&self) -> Result<(HeartbeatSender, HeartbeatStream, HeartbeatConfig)> {
732        self.heartbeat_client()?.heartbeat().await
733    }
734
735    /// Range gets the keys in the range from the key-value store.
736    pub async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
737        self.store_client()?
738            .range(req.into())
739            .await?
740            .try_into()
741            .context(ConvertMetaResponseSnafu)
742    }
743
744    /// Put puts the given key into the key-value store.
745    pub async fn put(&self, req: PutRequest) -> Result<PutResponse> {
746        self.store_client()?
747            .put(req.into())
748            .await?
749            .try_into()
750            .context(ConvertMetaResponseSnafu)
751    }
752
753    /// BatchGet atomically get values by the given keys from the key-value store.
754    pub async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
755        self.store_client()?
756            .batch_get(req.into())
757            .await?
758            .try_into()
759            .context(ConvertMetaResponseSnafu)
760    }
761
762    /// BatchPut atomically puts the given keys into the key-value store.
763    pub async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
764        self.store_client()?
765            .batch_put(req.into())
766            .await?
767            .try_into()
768            .context(ConvertMetaResponseSnafu)
769    }
770
771    /// BatchDelete atomically deletes the given keys from the key-value store.
772    pub async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
773        self.store_client()?
774            .batch_delete(req.into())
775            .await?
776            .try_into()
777            .context(ConvertMetaResponseSnafu)
778    }
779
780    /// CompareAndPut atomically puts the value to the given updated
781    /// value if the current value == the expected value.
782    pub async fn compare_and_put(
783        &self,
784        req: CompareAndPutRequest,
785    ) -> Result<CompareAndPutResponse> {
786        self.store_client()?
787            .compare_and_put(req.into())
788            .await?
789            .try_into()
790            .context(ConvertMetaResponseSnafu)
791    }
792
793    /// DeleteRange deletes the given range from the key-value store.
794    pub async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
795        self.store_client()?
796            .delete_range(req.into())
797            .await?
798            .try_into()
799            .context(ConvertMetaResponseSnafu)
800    }
801
802    /// Query the procedure state by its id.
803    pub async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse> {
804        self.procedure_client()?.query_procedure_state(pid).await
805    }
806
807    /// Submit a region migration task.
808    pub async fn migrate_region(
809        &self,
810        request: MigrateRegionRequest,
811    ) -> Result<MigrateRegionResponse> {
812        self.procedure_client()?
813            .migrate_region(
814                request.region_id,
815                request.from_peer,
816                request.to_peer,
817                request.timeout,
818            )
819            .await
820    }
821
822    /// Reconcile the procedure state.
823    pub async fn reconcile(&self, request: ReconcileRequest) -> Result<ReconcileResponse> {
824        self.procedure_client()?.reconcile(request).await
825    }
826
827    /// Manually trigger GC for specific regions.
828    pub async fn gc_regions(&self, request: GcRegionsRequest) -> Result<GcResponse> {
829        self.procedure_client()?.gc_regions(request).await
830    }
831
832    /// Manually trigger GC for a table (all its regions).
833    pub async fn gc_table(&self, request: GcTableRequest) -> Result<GcResponse> {
834        self.procedure_client()?.gc_table(request).await
835    }
836
837    /// Submit a DDL task
838    pub async fn submit_ddl_task(
839        &self,
840        req: SubmitDdlTaskRequest,
841    ) -> Result<SubmitDdlTaskResponse> {
842        let res = self
843            .procedure_client()?
844            .submit_ddl_task(req.try_into().context(ConvertMetaRequestSnafu)?)
845            .await?
846            .try_into()
847            .context(ConvertMetaResponseSnafu)?;
848
849        Ok(res)
850    }
851
852    pub fn heartbeat_client(&self) -> Result<HeartbeatClient> {
853        self.heartbeat.clone().context(NotStartedSnafu {
854            name: "heartbeat_client",
855        })
856    }
857
858    pub fn config_client(&self) -> Result<ConfigClient> {
859        self.config.clone().context(NotStartedSnafu {
860            name: "config_client",
861        })
862    }
863
864    pub fn store_client(&self) -> Result<StoreClient> {
865        self.store.clone().context(NotStartedSnafu {
866            name: "store_client",
867        })
868    }
869
870    pub fn procedure_client(&self) -> Result<ProcedureClient> {
871        self.procedure.clone().context(NotStartedSnafu {
872            name: "procedure_client",
873        })
874    }
875
876    pub fn cluster_client(&self) -> Result<ClusterClient> {
877        self.cluster.clone().context(NotStartedSnafu {
878            name: "cluster_client",
879        })
880    }
881
882    pub fn channel_config(&self) -> &ChannelConfig {
883        self.channel_manager.config()
884    }
885
886    pub fn id(&self) -> Id {
887        self.id
888    }
889}
890
891#[cfg(test)]
892mod tests {
893    use std::sync::atomic::{AtomicUsize, Ordering};
894
895    use api::v1::meta::{HeartbeatRequest, Peer};
896    use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
897    use rand::Rng;
898
899    use super::*;
900    use crate::error;
901    use crate::mocks::{self, MockMetaContext};
902
903    const TEST_KEY_PREFIX: &str = "__unit_test__meta__";
904
905    struct TestClient {
906        ns: String,
907        client: MetaClient,
908        meta_ctx: MockMetaContext,
909    }
910
911    impl TestClient {
912        async fn new(ns: impl Into<String>) -> Self {
913            // can also test with etcd: mocks::mock_client_with_etcdstore("127.0.0.1:2379").await;
914            let (client, meta_ctx) = mocks::mock_client_with_memstore().await;
915            Self {
916                ns: ns.into(),
917                client,
918                meta_ctx,
919            }
920        }
921
922        fn key(&self, name: &str) -> Vec<u8> {
923            format!("{}-{}-{}", TEST_KEY_PREFIX, self.ns, name).into_bytes()
924        }
925
926        async fn gen_data(&self) {
927            for i in 0..10 {
928                let req = PutRequest::new()
929                    .with_key(self.key(&format!("key-{i}")))
930                    .with_value(format!("{}-{}", "value", i).into_bytes())
931                    .with_prev_kv();
932                let res = self.client.put(req).await;
933                let _ = res.unwrap();
934            }
935        }
936
937        async fn clear_data(&self) {
938            let req =
939                DeleteRangeRequest::new().with_prefix(format!("{}-{}", TEST_KEY_PREFIX, self.ns));
940            let res = self.client.delete_range(req).await;
941            let _ = res.unwrap();
942        }
943
944        #[allow(dead_code)]
945        fn kv_backend(&self) -> KvBackendRef {
946            self.meta_ctx.kv_backend.clone()
947        }
948
949        fn in_memory(&self) -> Option<ResettableKvBackendRef> {
950            self.meta_ctx.in_memory.clone()
951        }
952    }
953
954    async fn new_client(ns: impl Into<String>) -> TestClient {
955        let client = TestClient::new(ns).await;
956        client.clear_data().await;
957        client
958    }
959
960    #[tokio::test]
961    async fn test_meta_client_builder() {
962        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
963
964        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
965            .enable_heartbeat()
966            .build();
967        let _ = meta_client.heartbeat_client().unwrap();
968        assert!(meta_client.store_client().is_err());
969        meta_client.start(urls).await.unwrap();
970
971        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build();
972        assert!(meta_client.heartbeat_client().is_err());
973        assert!(meta_client.store_client().is_err());
974        meta_client.start(urls).await.unwrap();
975
976        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
977            .enable_store()
978            .build();
979        assert!(meta_client.heartbeat_client().is_err());
980        let _ = meta_client.store_client().unwrap();
981        meta_client.start(urls).await.unwrap();
982
983        let mut meta_client = MetaClientBuilder::new(2, Role::Datanode)
984            .enable_heartbeat()
985            .enable_store()
986            .build();
987        assert_eq!(2, meta_client.id());
988        assert_eq!(2, meta_client.id());
989        let _ = meta_client.heartbeat_client().unwrap();
990        let _ = meta_client.store_client().unwrap();
991        meta_client.start(urls).await.unwrap();
992    }
993
994    #[tokio::test]
995    async fn test_not_start_heartbeat_client() {
996        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
997        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
998            .enable_store()
999            .build();
1000        meta_client.start(urls).await.unwrap();
1001        let res = meta_client.ask_leader().await;
1002        assert!(matches!(res.err(), Some(error::Error::NotStarted { .. })));
1003    }
1004
1005    #[tokio::test]
1006    async fn test_not_start_store_client() {
1007        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
1008        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
1009            .enable_heartbeat()
1010            .build();
1011
1012        meta_client.start(urls).await.unwrap();
1013        let res = meta_client.put(PutRequest::default()).await;
1014        assert!(matches!(res.err(), Some(error::Error::NotStarted { .. })));
1015    }
1016
1017    #[tokio::test]
1018    async fn test_store_writes_are_read_only_by_default() {
1019        let meta_client = MetaClientBuilder::new(0, Role::Datanode)
1020            .enable_store()
1021            .build();
1022
1023        let res = meta_client.put(PutRequest::default()).await;
1024        assert!(matches!(
1025            res.err(),
1026            Some(error::Error::ReadOnlyKvBackend { .. })
1027        ));
1028    }
1029
1030    #[tokio::test]
1031    async fn test_ask_leader() {
1032        let tc = new_client("test_ask_leader").await;
1033        tc.client.ask_leader().await.unwrap();
1034    }
1035
1036    #[tokio::test]
1037    async fn test_heartbeat() {
1038        let tc = new_client("test_heartbeat").await;
1039        let (sender, mut receiver, _config) = tc.client.heartbeat().await.unwrap();
1040        // send heartbeats
1041
1042        let request_sent = Arc::new(AtomicUsize::new(0));
1043        let request_sent_clone = request_sent.clone();
1044        let _handle = tokio::spawn(async move {
1045            for _ in 0..5 {
1046                let req = HeartbeatRequest {
1047                    peer: Some(Peer {
1048                        id: 1,
1049                        addr: "meta_client_peer".to_string(),
1050                    }),
1051                    ..Default::default()
1052                };
1053                sender.send(req).await.unwrap();
1054                request_sent_clone.fetch_add(1, Ordering::Relaxed);
1055            }
1056        });
1057
1058        let heartbeat_count = Arc::new(AtomicUsize::new(0));
1059        let heartbeat_count_clone = heartbeat_count.clone();
1060        let handle = tokio::spawn(async move {
1061            while let Some(_resp) = receiver.message().await.unwrap() {
1062                heartbeat_count_clone.fetch_add(1, Ordering::Relaxed);
1063            }
1064        });
1065
1066        handle.await.unwrap();
1067        //+1 for the initial response
1068        assert_eq!(
1069            request_sent.load(Ordering::Relaxed) + 1,
1070            heartbeat_count.load(Ordering::Relaxed)
1071        );
1072    }
1073
1074    #[tokio::test]
1075    async fn test_range_get() {
1076        let tc = new_client("test_range_get").await;
1077        tc.gen_data().await;
1078
1079        let key = tc.key("key-0");
1080        let req = RangeRequest::new().with_key(key.as_slice());
1081        let res = tc.client.range(req).await;
1082        let mut kvs = res.unwrap().take_kvs();
1083        assert_eq!(1, kvs.len());
1084        let mut kv = kvs.pop().unwrap();
1085        assert_eq!(key, kv.take_key());
1086        assert_eq!(b"value-0".to_vec(), kv.take_value());
1087    }
1088
1089    #[tokio::test]
1090    async fn test_range_get_prefix() {
1091        let tc = new_client("test_range_get_prefix").await;
1092        tc.gen_data().await;
1093
1094        let req = RangeRequest::new().with_prefix(tc.key("key-"));
1095        let res = tc.client.range(req).await;
1096        let kvs = res.unwrap().take_kvs();
1097        assert_eq!(10, kvs.len());
1098        for (i, mut kv) in kvs.into_iter().enumerate() {
1099            assert_eq!(tc.key(&format!("key-{i}")), kv.take_key());
1100            assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
1101        }
1102    }
1103
1104    #[tokio::test]
1105    async fn test_range() {
1106        let tc = new_client("test_range").await;
1107        tc.gen_data().await;
1108
1109        let req = RangeRequest::new().with_range(tc.key("key-5"), tc.key("key-8"));
1110        let res = tc.client.range(req).await;
1111        let kvs = res.unwrap().take_kvs();
1112        assert_eq!(3, kvs.len());
1113        for (i, mut kv) in kvs.into_iter().enumerate() {
1114            assert_eq!(tc.key(&format!("key-{}", i + 5)), kv.take_key());
1115            assert_eq!(
1116                format!("{}-{}", "value", i + 5).into_bytes(),
1117                kv.take_value()
1118            );
1119        }
1120    }
1121
1122    #[tokio::test]
1123    async fn test_range_keys_only() {
1124        let tc = new_client("test_range_keys_only").await;
1125        tc.gen_data().await;
1126
1127        let req = RangeRequest::new()
1128            .with_range(tc.key("key-5"), tc.key("key-8"))
1129            .with_keys_only();
1130        let res = tc.client.range(req).await;
1131        let kvs = res.unwrap().take_kvs();
1132        assert_eq!(3, kvs.len());
1133        for (i, mut kv) in kvs.into_iter().enumerate() {
1134            assert_eq!(tc.key(&format!("key-{}", i + 5)), kv.take_key());
1135            assert!(kv.take_value().is_empty());
1136        }
1137    }
1138
1139    #[tokio::test]
1140    async fn test_put() {
1141        let tc = new_client("test_put").await;
1142
1143        let req = PutRequest::new()
1144            .with_key(tc.key("key"))
1145            .with_value(b"value".to_vec());
1146        let res = tc.client.put(req).await;
1147        assert!(res.unwrap().prev_kv.is_none());
1148    }
1149
1150    #[tokio::test]
1151    async fn test_put_with_prev_kv() {
1152        let tc = new_client("test_put_with_prev_kv").await;
1153
1154        let key = tc.key("key");
1155        let req = PutRequest::new()
1156            .with_key(key.as_slice())
1157            .with_value(b"value".to_vec())
1158            .with_prev_kv();
1159        let res = tc.client.put(req).await;
1160        assert!(res.unwrap().prev_kv.is_none());
1161
1162        let req = PutRequest::new()
1163            .with_key(key.as_slice())
1164            .with_value(b"value1".to_vec())
1165            .with_prev_kv();
1166        let res = tc.client.put(req).await;
1167        let mut kv = res.unwrap().prev_kv.unwrap();
1168        assert_eq!(key, kv.take_key());
1169        assert_eq!(b"value".to_vec(), kv.take_value());
1170    }
1171
1172    #[tokio::test]
1173    async fn test_batch_put() {
1174        let tc = new_client("test_batch_put").await;
1175
1176        let mut req = BatchPutRequest::new();
1177        for i in 0..275 {
1178            req = req.add_kv(
1179                tc.key(&format!("key-{}", i)),
1180                format!("value-{}", i).into_bytes(),
1181            );
1182        }
1183
1184        let res = tc.client.batch_put(req).await;
1185        assert_eq!(0, res.unwrap().take_prev_kvs().len());
1186
1187        let req = RangeRequest::new().with_prefix(tc.key("key-"));
1188        let res = tc.client.range(req).await;
1189        let kvs = res.unwrap().take_kvs();
1190        assert_eq!(275, kvs.len());
1191    }
1192
1193    #[tokio::test]
1194    async fn test_batch_get() {
1195        let tc = new_client("test_batch_get").await;
1196        tc.gen_data().await;
1197
1198        let mut req = BatchGetRequest::default();
1199        for i in 0..256 {
1200            req = req.add_key(tc.key(&format!("key-{}", i)));
1201        }
1202        let res = tc.client.batch_get(req).await.unwrap();
1203        assert_eq!(10, res.kvs.len());
1204
1205        let req = BatchGetRequest::default()
1206            .add_key(tc.key("key-1"))
1207            .add_key(tc.key("key-999"));
1208        let res = tc.client.batch_get(req).await.unwrap();
1209        assert_eq!(1, res.kvs.len());
1210    }
1211
1212    #[tokio::test]
1213    async fn test_batch_put_with_prev_kv() {
1214        let tc = new_client("test_batch_put_with_prev_kv").await;
1215
1216        let key = tc.key("key");
1217        let key2 = tc.key("key2");
1218        let req = BatchPutRequest::new().add_kv(key.as_slice(), b"value".to_vec());
1219        let res = tc.client.batch_put(req).await;
1220        assert_eq!(0, res.unwrap().take_prev_kvs().len());
1221
1222        let req = BatchPutRequest::new()
1223            .add_kv(key.as_slice(), b"value-".to_vec())
1224            .add_kv(key2.as_slice(), b"value2-".to_vec())
1225            .with_prev_kv();
1226        let res = tc.client.batch_put(req).await;
1227        let mut kvs = res.unwrap().take_prev_kvs();
1228        assert_eq!(1, kvs.len());
1229        let mut kv = kvs.pop().unwrap();
1230        assert_eq!(key, kv.take_key());
1231        assert_eq!(b"value".to_vec(), kv.take_value());
1232    }
1233
1234    #[tokio::test]
1235    async fn test_compare_and_put() {
1236        let tc = new_client("test_compare_and_put").await;
1237
1238        let key = tc.key("key");
1239        let req = CompareAndPutRequest::new()
1240            .with_key(key.as_slice())
1241            .with_expect(b"expect".to_vec())
1242            .with_value(b"value".to_vec());
1243        let res = tc.client.compare_and_put(req).await;
1244        assert!(!res.unwrap().is_success());
1245
1246        // create if absent
1247        let req = CompareAndPutRequest::new()
1248            .with_key(key.as_slice())
1249            .with_value(b"value".to_vec());
1250        let res = tc.client.compare_and_put(req).await;
1251        let mut res = res.unwrap();
1252        assert!(res.is_success());
1253        assert!(res.take_prev_kv().is_none());
1254
1255        // compare and put fail
1256        let req = CompareAndPutRequest::new()
1257            .with_key(key.as_slice())
1258            .with_expect(b"not_eq".to_vec())
1259            .with_value(b"value2".to_vec());
1260        let res = tc.client.compare_and_put(req).await;
1261        let mut res = res.unwrap();
1262        assert!(!res.is_success());
1263        assert_eq!(b"value".to_vec(), res.take_prev_kv().unwrap().take_value());
1264
1265        // compare and put success
1266        let req = CompareAndPutRequest::new()
1267            .with_key(key.as_slice())
1268            .with_expect(b"value".to_vec())
1269            .with_value(b"value2".to_vec());
1270        let res = tc.client.compare_and_put(req).await;
1271        let mut res = res.unwrap();
1272        assert!(res.is_success());
1273
1274        // If compare-and-put is success, previous value doesn't need to be returned.
1275        assert!(res.take_prev_kv().is_none());
1276    }
1277
1278    #[tokio::test]
1279    async fn test_delete_with_key() {
1280        let tc = new_client("test_delete_with_key").await;
1281        tc.gen_data().await;
1282
1283        let req = DeleteRangeRequest::new()
1284            .with_key(tc.key("key-0"))
1285            .with_prev_kv();
1286        let res = tc.client.delete_range(req).await;
1287        let mut res = res.unwrap();
1288        assert_eq!(1, res.deleted());
1289        let mut kvs = res.take_prev_kvs();
1290        assert_eq!(1, kvs.len());
1291        let mut kv = kvs.pop().unwrap();
1292        assert_eq!(b"value-0".to_vec(), kv.take_value());
1293    }
1294
1295    #[tokio::test]
1296    async fn test_delete_with_prefix() {
1297        let tc = new_client("test_delete_with_prefix").await;
1298        tc.gen_data().await;
1299
1300        let req = DeleteRangeRequest::new()
1301            .with_prefix(tc.key("key-"))
1302            .with_prev_kv();
1303        let res = tc.client.delete_range(req).await;
1304        let mut res = res.unwrap();
1305        assert_eq!(10, res.deleted());
1306        let kvs = res.take_prev_kvs();
1307        assert_eq!(10, kvs.len());
1308        for (i, mut kv) in kvs.into_iter().enumerate() {
1309            assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
1310        }
1311    }
1312
1313    #[tokio::test]
1314    async fn test_delete_with_range() {
1315        let tc = new_client("test_delete_with_range").await;
1316        tc.gen_data().await;
1317
1318        let req = DeleteRangeRequest::new()
1319            .with_range(tc.key("key-2"), tc.key("key-7"))
1320            .with_prev_kv();
1321        let res = tc.client.delete_range(req).await;
1322        let mut res = res.unwrap();
1323        assert_eq!(5, res.deleted());
1324        let kvs = res.take_prev_kvs();
1325        assert_eq!(5, kvs.len());
1326        for (i, mut kv) in kvs.into_iter().enumerate() {
1327            assert_eq!(
1328                format!("{}-{}", "value", i + 2).into_bytes(),
1329                kv.take_value()
1330            );
1331        }
1332    }
1333
1334    fn mock_decoder(_kv: KeyValue) -> MetaResult<()> {
1335        Ok(())
1336    }
1337
1338    #[tokio::test]
1339    async fn test_cluster_client_adaptive_range() {
1340        let tx = new_client("test_cluster_client").await;
1341        let in_memory = tx.in_memory().unwrap();
1342        let cluster_client = tx.client.cluster_client().unwrap();
1343        let mut rng = rand::rng();
1344
1345        // Generates rough 10MB data, which is larger than the default grpc message size limit.
1346        for i in 0..10 {
1347            let data: Vec<u8> = (0..1024 * 1024).map(|_| rng.random()).collect();
1348            in_memory
1349                .put(
1350                    PutRequest::new()
1351                        .with_key(format!("__prefix/{i}").as_bytes())
1352                        .with_value(data.clone()),
1353                )
1354                .await
1355                .unwrap();
1356        }
1357
1358        let req = RangeRequest::new().with_prefix(b"__prefix/");
1359        let stream =
1360            PaginationStream::new(Arc::new(cluster_client), req, 10, mock_decoder).into_stream();
1361
1362        let res = stream.try_collect::<Vec<_>>().await.unwrap();
1363        assert_eq!(10, res.len());
1364    }
1365}