1use std::collections::HashSet;
16use std::sync::Arc;
17use std::time::Duration;
18
19use async_trait::async_trait;
20use clap::{Parser, ValueEnum};
21use common_error::ext::BoxedError;
22use common_telemetry::{debug, error, info};
23use object_store::ObjectStore;
24use serde_json::Value;
25use snafu::{OptionExt, ResultExt};
26use tokio::sync::Semaphore;
27use tokio::time::Instant;
28
29use crate::common::{ObjectStoreConfig, new_fs_object_store};
30use crate::data::storage_export::{
31 AzblobBackend, FsBackend, GcsBackend, OssBackend, S3Backend, StorageExport, StorageType,
32};
33use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
34use crate::database::{DatabaseClient, parse_proxy_opts};
35use crate::error::{
36 EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, SchemaNotFoundSnafu,
37};
38use crate::{Tool, database};
39
40type TableReference = (String, String, String);
41
42#[derive(Debug, Default, Clone, ValueEnum)]
43enum ExportTarget {
44 Schema,
46 Data,
48 #[default]
50 All,
51}
52
53#[derive(Debug, Default, Parser)]
55pub struct ExportCommand {
56 #[clap(long)]
58 addr: String,
59
60 #[clap(long)]
63 output_dir: Option<String>,
64
65 #[clap(long, default_value_t = default_database())]
67 database: String,
68
69 #[clap(long, short = 'j', default_value = "1", alias = "export-jobs")]
73 db_parallelism: usize,
74
75 #[clap(long, default_value = "4")]
79 table_parallelism: usize,
80
81 #[clap(long, default_value = "3")]
83 max_retry: usize,
84
85 #[clap(long, short = 't', value_enum, default_value = "all")]
87 target: ExportTarget,
88
89 #[clap(long)]
92 start_time: Option<String>,
93
94 #[clap(long)]
97 end_time: Option<String>,
98
99 #[clap(long)]
101 auth_basic: Option<String>,
102
103 #[clap(long, value_parser = humantime::parse_duration)]
108 timeout: Option<Duration>,
109
110 #[clap(long)]
114 proxy: Option<String>,
115
116 #[clap(long)]
118 no_proxy: bool,
119
120 #[clap(long)]
128 ddl_local_dir: Option<String>,
129
130 #[clap(flatten)]
131 storage: ObjectStoreConfig,
132}
133
134impl ExportCommand {
135 pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
136 let (storage_type, operator) = if self.storage.enable_s3 {
138 (
139 StorageType::S3(S3Backend::new(self.storage.s3.clone())?),
140 self.storage.build_s3()?,
141 )
142 } else if self.storage.enable_oss {
143 (
144 StorageType::Oss(OssBackend::new(self.storage.oss.clone())?),
145 self.storage.build_oss()?,
146 )
147 } else if self.storage.enable_gcs {
148 (
149 StorageType::Gcs(GcsBackend::new(self.storage.gcs.clone())?),
150 self.storage.build_gcs()?,
151 )
152 } else if self.storage.enable_azblob {
153 (
154 StorageType::Azblob(AzblobBackend::new(self.storage.azblob.clone())?),
155 self.storage.build_azblob()?,
156 )
157 } else if let Some(output_dir) = &self.output_dir {
158 (
159 StorageType::Fs(FsBackend::new(output_dir.clone())),
160 new_fs_object_store(output_dir)?,
161 )
162 } else {
163 return Err(BoxedError::new(OutputDirNotSetSnafu {}.build()));
164 };
165
166 let (catalog, schema) =
167 database::split_database(&self.database).map_err(BoxedError::new)?;
168 let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
169 let database_client = DatabaseClient::new(
170 self.addr.clone(),
171 catalog.clone(),
172 self.auth_basic.clone(),
173 self.timeout.unwrap_or_default(),
175 proxy,
176 );
177
178 Ok(Box::new(Export {
179 catalog,
180 schema,
181 database_client,
182 export_jobs: self.db_parallelism,
183 target: self.target.clone(),
184 start_time: self.start_time.clone(),
185 end_time: self.end_time.clone(),
186 parallelism: self.table_parallelism,
187 storage_type,
188 ddl_local_dir: self.ddl_local_dir.clone(),
189 operator,
190 }))
191 }
192}
193
194#[derive(Clone)]
195pub struct Export {
196 catalog: String,
197 schema: Option<String>,
198 database_client: DatabaseClient,
199 export_jobs: usize,
200 target: ExportTarget,
201 start_time: Option<String>,
202 end_time: Option<String>,
203 parallelism: usize,
204 storage_type: StorageType,
205 ddl_local_dir: Option<String>,
206 operator: ObjectStore,
207}
208
209impl Export {
210 async fn get_db_names(&self) -> Result<Vec<String>> {
211 let db_names = self.all_db_names().await?;
212 let Some(schema) = &self.schema else {
213 return Ok(db_names);
214 };
215
216 db_names
218 .into_iter()
219 .find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
220 .map(|name| vec![name])
221 .context(SchemaNotFoundSnafu {
222 catalog: &self.catalog,
223 schema,
224 })
225 }
226
227 async fn all_db_names(&self) -> Result<Vec<String>> {
229 let records = self
230 .database_client
231 .sql_in_public("SHOW DATABASES")
232 .await?
233 .context(EmptyResultSnafu)?;
234 let mut result = Vec::with_capacity(records.len());
235 for value in records {
236 let Value::String(schema) = &value[0] else {
237 unreachable!()
238 };
239 if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
240 continue;
241 }
242 if schema == common_catalog::consts::PG_CATALOG_NAME {
243 continue;
244 }
245 result.push(schema.clone());
246 }
247 Ok(result)
248 }
249
250 async fn get_table_list(
253 &self,
254 catalog: &str,
255 schema: &str,
256 ) -> Result<(
257 Vec<TableReference>,
258 Vec<TableReference>,
259 Vec<TableReference>,
260 )> {
261 let sql = format!(
263 "SELECT table_catalog, table_schema, table_name \
264 FROM information_schema.columns \
265 WHERE column_name = '__tsid' \
266 and table_catalog = \'{catalog}\' \
267 and table_schema = \'{schema}\'"
268 );
269 let records = self
270 .database_client
271 .sql_in_public(&sql)
272 .await?
273 .context(EmptyResultSnafu)?;
274 let mut metric_physical_tables = HashSet::with_capacity(records.len());
275 for value in records {
276 let mut t = Vec::with_capacity(3);
277 for v in &value {
278 let Value::String(value) = v else {
279 unreachable!()
280 };
281 t.push(value);
282 }
283 metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
284 }
285
286 let sql = format!(
287 "SELECT table_catalog, table_schema, table_name, table_type \
288 FROM information_schema.tables \
289 WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
290 and table_catalog = \'{catalog}\' \
291 and table_schema = \'{schema}\'",
292 );
293 let records = self
294 .database_client
295 .sql_in_public(&sql)
296 .await?
297 .context(EmptyResultSnafu)?;
298
299 debug!("Fetched table/view list: {:?}", records);
300
301 if records.is_empty() {
302 return Ok((vec![], vec![], vec![]));
303 }
304
305 let mut remaining_tables = Vec::with_capacity(records.len());
306 let mut views = Vec::new();
307 for value in records {
308 let mut t = Vec::with_capacity(4);
309 for v in &value {
310 let Value::String(value) = v else {
311 unreachable!()
312 };
313 t.push(value);
314 }
315 let table = (t[0].clone(), t[1].clone(), t[2].clone());
316 let table_type = t[3].as_str();
317 if !metric_physical_tables.contains(&table) {
319 if table_type == "VIEW" {
320 views.push(table);
321 } else {
322 remaining_tables.push(table);
323 }
324 }
325 }
326
327 Ok((
328 metric_physical_tables.into_iter().collect(),
329 remaining_tables,
330 views,
331 ))
332 }
333
334 async fn show_create(
335 &self,
336 show_type: &str,
337 catalog: &str,
338 schema: &str,
339 table: Option<&str>,
340 ) -> Result<String> {
341 let sql = match table {
342 Some(table) => format!(
343 r#"SHOW CREATE {} "{}"."{}"."{}""#,
344 show_type, catalog, schema, table
345 ),
346 None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
347 };
348 let records = self
349 .database_client
350 .sql_in_public(&sql)
351 .await?
352 .context(EmptyResultSnafu)?;
353 let Value::String(create) = &records[0][1] else {
354 unreachable!()
355 };
356
357 Ok(format!("{};\n", create))
358 }
359
360 async fn export_create_database(&self) -> Result<()> {
361 let timer = Instant::now();
362 let db_names = self.get_db_names().await?;
363 let db_count = db_names.len();
364 let operator = self.build_prefer_fs_operator().await?;
365
366 for schema in db_names {
367 let create_database = self
368 .show_create("DATABASE", &self.catalog, &schema, None)
369 .await?;
370
371 let file_path = self.get_file_path(&schema, "create_database.sql");
372 self.write_to_storage(&operator, &file_path, create_database.into_bytes())
373 .await?;
374
375 info!(
376 "Exported {}.{} database creation SQL to {}",
377 self.catalog,
378 schema,
379 self.storage_type.format_output_path(&file_path)
380 );
381 }
382
383 let elapsed = timer.elapsed();
384 info!("Success {db_count} jobs, cost: {elapsed:?}");
385
386 Ok(())
387 }
388
389 async fn export_create_table(&self) -> Result<()> {
390 let timer = Instant::now();
391 let semaphore = Arc::new(Semaphore::new(self.export_jobs));
392 let db_names = self.get_db_names().await?;
393 let db_count = db_names.len();
394 let operator = Arc::new(self.build_prefer_fs_operator().await?);
395 let mut tasks = Vec::with_capacity(db_names.len());
396
397 for schema in db_names {
398 let semaphore_moved = semaphore.clone();
399 let export_self = self.clone();
400 let operator = operator.clone();
401 tasks.push(async move {
402 let _permit = semaphore_moved.acquire().await.unwrap();
403 let (metric_physical_tables, remaining_tables, views) = export_self
404 .get_table_list(&export_self.catalog, &schema)
405 .await?;
406
407 if !export_self.storage_type.is_remote_storage() {
409 let db_dir = format!("{}/{}/", export_self.catalog, schema);
410 operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
411 }
412
413 let file_path = export_self.get_file_path(&schema, "create_tables.sql");
414 let mut content = Vec::new();
415
416 for (c, s, t) in metric_physical_tables.iter().chain(&remaining_tables) {
418 let create_table = export_self.show_create("TABLE", c, s, Some(t)).await?;
419 content.extend_from_slice(create_table.as_bytes());
420 }
421
422 for (c, s, v) in &views {
424 let create_view = export_self.show_create("VIEW", c, s, Some(v)).await?;
425 content.extend_from_slice(create_view.as_bytes());
426 }
427
428 export_self
430 .write_to_storage(&operator, &file_path, content)
431 .await?;
432
433 info!(
434 "Finished exporting {}.{schema} with {} table schemas to path: {}",
435 export_self.catalog,
436 metric_physical_tables.len() + remaining_tables.len() + views.len(),
437 export_self.storage_type.format_output_path(&file_path)
438 );
439
440 Ok::<(), Error>(())
441 });
442 }
443
444 let success = self.execute_tasks(tasks).await;
445 let elapsed = timer.elapsed();
446 info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
447
448 Ok(())
449 }
450
451 async fn build_operator(&self) -> Result<ObjectStore> {
452 Ok(self.operator.clone())
453 }
454
455 async fn build_prefer_fs_operator(&self) -> Result<ObjectStore> {
457 if self.storage_type.is_remote_storage() && self.ddl_local_dir.is_some() {
458 let root = self.ddl_local_dir.as_ref().unwrap().clone();
459 let op = new_fs_object_store(&root).map_err(|e| Error::Other {
460 source: e,
461 location: snafu::location!(),
462 })?;
463 Ok(op)
464 } else {
465 Ok(self.operator.clone())
466 }
467 }
468
469 async fn export_database_data(&self) -> Result<()> {
470 let timer = Instant::now();
471 let semaphore = Arc::new(Semaphore::new(self.export_jobs));
472 let db_names = self.get_db_names().await?;
473 let db_count = db_names.len();
474 let mut tasks = Vec::with_capacity(db_count);
475 let operator = Arc::new(self.build_operator().await?);
476 let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
477 let with_options = build_with_options(&self.start_time, &self.end_time, self.parallelism);
478
479 for schema in db_names {
480 let semaphore_moved = semaphore.clone();
481 let export_self = self.clone();
482 let with_options_clone = with_options.clone();
483 let operator = operator.clone();
484 let fs_first_operator = fs_first_operator.clone();
485
486 tasks.push(async move {
487 let _permit = semaphore_moved.acquire().await.unwrap();
488
489 if !export_self.storage_type.is_remote_storage() {
491 let db_dir = format!("{}/{}/", export_self.catalog, schema);
492 operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
493 }
494
495 let (path, connection_part) = export_self
496 .storage_type
497 .get_storage_path(&export_self.catalog, &schema);
498
499 let sql = format!(
501 r#"COPY DATABASE "{}"."{}" TO '{}' WITH ({}){};"#,
502 export_self.catalog, schema, path, with_options_clone, connection_part
503 );
504
505 let safe_sql = export_self.storage_type.mask_sensitive_info(&sql);
507 info!("Executing sql: {}", safe_sql);
508
509 export_self.database_client.sql_in_public(&sql).await?;
510 info!(
511 "Finished exporting {}.{} data to {}",
512 export_self.catalog, schema, path
513 );
514
515 let copy_database_from_sql = {
517 let command_without_connection = format!(
518 r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({});"#,
519 export_self.catalog, schema, COPY_PATH_PLACEHOLDER, with_options_clone
520 );
521
522 if connection_part.is_empty() {
523 command_without_connection
524 } else {
525 let command_with_connection = format!(
526 r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
527 export_self.catalog, schema, path, with_options_clone, connection_part
528 );
529
530 format!(
531 "-- {}\n{}",
532 command_with_connection, command_without_connection
533 )
534 }
535 };
536
537 let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
538 export_self
539 .write_to_storage(
540 &fs_first_operator,
541 ©_from_path,
542 copy_database_from_sql.into_bytes(),
543 )
544 .await?;
545
546 info!(
547 "Finished exporting {}.{} copy_from.sql to {}",
548 export_self.catalog,
549 schema,
550 export_self.storage_type.format_output_path(©_from_path)
551 );
552
553 Ok::<(), Error>(())
554 });
555 }
556
557 let success = self.execute_tasks(tasks).await;
558 let elapsed = timer.elapsed();
559 info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
560
561 Ok(())
562 }
563
564 fn get_file_path(&self, schema: &str, file_name: &str) -> String {
565 format!("{}/{}/{}", self.catalog, schema, file_name)
566 }
567
568 async fn write_to_storage(
569 &self,
570 op: &ObjectStore,
571 file_path: &str,
572 content: Vec<u8>,
573 ) -> Result<()> {
574 op.write(file_path, content)
575 .await
576 .context(OpenDalSnafu)
577 .map(|_| ())
578 }
579
580 async fn execute_tasks(
581 &self,
582 tasks: Vec<impl std::future::Future<Output = Result<()>>>,
583 ) -> usize {
584 futures::future::join_all(tasks)
585 .await
586 .into_iter()
587 .filter(|r| match r {
588 Ok(_) => true,
589 Err(e) => {
590 error!(e; "export job failed");
591 false
592 }
593 })
594 .count()
595 }
596}
597
598#[async_trait]
599impl Tool for Export {
600 async fn do_work(&self) -> std::result::Result<(), BoxedError> {
601 match self.target {
602 ExportTarget::Schema => {
603 self.export_create_database()
604 .await
605 .map_err(BoxedError::new)?;
606 self.export_create_table().await.map_err(BoxedError::new)
607 }
608 ExportTarget::Data => self.export_database_data().await.map_err(BoxedError::new),
609 ExportTarget::All => {
610 self.export_create_database()
611 .await
612 .map_err(BoxedError::new)?;
613 self.export_create_table().await.map_err(BoxedError::new)?;
614 self.export_database_data().await.map_err(BoxedError::new)
615 }
616 }
617 }
618}
619
620fn build_with_options(
622 start_time: &Option<String>,
623 end_time: &Option<String>,
624 parallelism: usize,
625) -> String {
626 let mut options = vec!["format = 'parquet'".to_string()];
627 if let Some(start) = start_time {
628 options.push(format!("start_time = '{}'", start));
629 }
630 if let Some(end) = end_time {
631 options.push(format!("end_time = '{}'", end));
632 }
633 options.push(format!("parallelism = {}", parallelism));
634 options.join(", ")
635}
636
637#[cfg(test)]
638mod tests {
639 use clap::Parser;
640 use common_test_util::temp_dir::create_temp_dir;
641
642 use super::*;
643
644 #[tokio::test]
647 async fn test_export_command_build_with_local_fs() {
648 let temp_dir = create_temp_dir("test_export_local_fs");
649 let output_dir = temp_dir.path().to_str().unwrap();
650
651 let cmd = ExportCommand::parse_from([
652 "export",
653 "--addr",
654 "127.0.0.1:4000",
655 "--output-dir",
656 output_dir,
657 ]);
658
659 let result = cmd.build().await;
660 assert!(result.is_ok());
661 }
662
663 #[tokio::test]
664 async fn test_export_command_build_with_s3_success() {
665 let cmd = ExportCommand::parse_from([
666 "export",
667 "--addr",
668 "127.0.0.1:4000",
669 "--s3",
670 "--s3-bucket",
671 "test-bucket",
672 "--s3-root",
673 "test-root",
674 "--s3-access-key-id",
675 "test-key",
676 "--s3-secret-access-key",
677 "test-secret",
678 "--s3-region",
680 "us-west-2",
681 "--s3-endpoint",
682 "https://s3.amazonaws.com",
683 ]);
684
685 let result = cmd.build().await;
686 assert!(result.is_ok());
687 }
688
689 #[tokio::test]
690 async fn test_export_command_build_with_oss_success() {
691 let cmd = ExportCommand::parse_from([
692 "export",
693 "--addr",
694 "127.0.0.1:4000",
695 "--oss",
696 "--oss-bucket",
697 "test-bucket",
698 "--oss-root",
699 "test-root",
700 "--oss-access-key-id",
701 "test-key-id",
702 "--oss-access-key-secret",
703 "test-secret",
704 "--oss-endpoint",
705 "https://oss.example.com",
706 ]);
707
708 let result = cmd.build().await;
709 assert!(result.is_ok());
710 }
711
712 #[tokio::test]
713 async fn test_export_command_build_with_gcs_success() {
714 let cmd = ExportCommand::parse_from([
715 "export",
716 "--addr",
717 "127.0.0.1:4000",
718 "--gcs",
719 "--gcs-bucket",
720 "test-bucket",
721 "--gcs-root",
722 "test-root",
723 "--gcs-scope",
724 "test-scope",
725 "--gcs-credential-path",
726 "/path/to/credential",
727 "--gcs-credential",
728 "test-credential-content",
729 "--gcs-endpoint",
730 "https://storage.googleapis.com",
731 ]);
732
733 let result = cmd.build().await;
734 assert!(result.is_ok());
735 }
736
737 #[tokio::test]
738 async fn test_export_command_build_with_gcs_adc_success() {
739 let cmd = ExportCommand::parse_from([
741 "export",
742 "--addr",
743 "127.0.0.1:4000",
744 "--gcs",
745 "--gcs-bucket",
746 "test-bucket",
747 "--gcs-root",
748 "test-root",
749 "--gcs-scope",
750 "test-scope",
751 ]);
754
755 let result = cmd.build().await;
756 assert!(result.is_ok());
757 }
758
759 #[tokio::test]
760 async fn test_export_command_build_with_azblob_success() {
761 let cmd = ExportCommand::parse_from([
762 "export",
763 "--addr",
764 "127.0.0.1:4000",
765 "--azblob",
766 "--azblob-container",
767 "test-container",
768 "--azblob-root",
769 "test-root",
770 "--azblob-account-name",
771 "test-account",
772 "--azblob-account-key",
773 "test-key",
774 "--azblob-endpoint",
775 "https://account.blob.core.windows.net",
776 ]);
777
778 let result = cmd.build().await;
779 assert!(result.is_ok());
780 }
781
782 #[tokio::test]
783 async fn test_export_command_build_with_azblob_with_sas_token() {
784 let cmd = ExportCommand::parse_from([
786 "export",
787 "--addr",
788 "127.0.0.1:4000",
789 "--azblob",
790 "--azblob-container",
791 "test-container",
792 "--azblob-root",
793 "test-root",
794 "--azblob-account-name",
795 "test-account",
796 "--azblob-account-key",
797 "test-key",
798 "--azblob-endpoint",
799 "https://account.blob.core.windows.net",
800 "--azblob-sas-token",
801 "test-sas-token",
802 ]);
803
804 let result = cmd.build().await;
805 assert!(result.is_ok());
806 }
807
808 #[test]
811 fn test_export_command_build_with_conflict() {
812 let result =
814 ExportCommand::try_parse_from(["export", "--addr", "127.0.0.1:4000", "--s3", "--oss"]);
815
816 assert!(result.is_err());
817 let err = result.unwrap_err();
818 assert!(err.kind() == clap::error::ErrorKind::ArgumentConflict);
820 }
821
822 #[tokio::test]
823 async fn test_export_command_build_with_s3_no_enable_flag() {
824 let result = ExportCommand::try_parse_from([
826 "export",
827 "--addr",
828 "127.0.0.1:4000",
829 "--s3-bucket",
831 "test-bucket",
832 "--s3-access-key-id",
833 "test-key",
834 "--output-dir",
835 "/tmp/test",
836 ]);
837
838 assert!(result.is_err());
839 let err = result.unwrap_err();
840 assert_eq!(err.kind(), clap::error::ErrorKind::MissingRequiredArgument);
841 assert!(err.to_string().contains("--s3"));
842 }
843
844 #[tokio::test]
845 async fn test_export_command_build_with_oss_no_enable_flag() {
846 let result = ExportCommand::try_parse_from([
848 "export",
849 "--addr",
850 "127.0.0.1:4000",
851 "--oss-bucket",
852 "test-bucket",
853 "--output-dir",
854 "/tmp/test",
855 ]);
856
857 assert!(result.is_err());
858 let err = result.unwrap_err();
859 assert_eq!(err.kind(), clap::error::ErrorKind::MissingRequiredArgument);
860 assert!(err.to_string().contains("--oss"));
861 }
862
863 #[tokio::test]
864 async fn test_export_command_build_with_gcs_no_enable_flag() {
865 let result = ExportCommand::try_parse_from([
867 "export",
868 "--addr",
869 "127.0.0.1:4000",
870 "--gcs-bucket",
871 "test-bucket",
872 "--output-dir",
873 "/tmp/test",
874 ]);
875
876 assert!(result.is_err());
877 let err = result.unwrap_err();
878 assert_eq!(err.kind(), clap::error::ErrorKind::MissingRequiredArgument);
879 assert!(err.to_string().contains("--gcs"));
880 }
881
882 #[tokio::test]
883 async fn test_export_command_build_with_azblob_no_enable_flag() {
884 let result = ExportCommand::try_parse_from([
886 "export",
887 "--addr",
888 "127.0.0.1:4000",
889 "--azblob-container",
890 "test-container",
891 "--output-dir",
892 "/tmp/test",
893 ]);
894
895 assert!(result.is_err());
896 let err = result.unwrap_err();
897 assert_eq!(err.kind(), clap::error::ErrorKind::MissingRequiredArgument);
898 assert!(err.to_string().contains("--azblob"));
899 }
900
901 #[tokio::test]
904 async fn test_export_command_build_with_s3_empty_root() {
905 let cmd = ExportCommand::parse_from([
907 "export",
908 "--addr",
909 "127.0.0.1:4000",
910 "--s3",
911 "--s3-bucket",
912 "test-bucket",
913 "--s3-root",
914 "", "--s3-access-key-id",
916 "test-key",
917 "--s3-secret-access-key",
918 "test-secret",
919 "--s3-region",
920 "us-west-2",
921 ]);
922
923 let result = cmd.build().await;
924 assert!(
926 result.is_ok(),
927 "Expected success but got: {:?}",
928 result.err()
929 );
930 }
931
932 #[tokio::test]
933 async fn test_export_command_build_with_oss_empty_access_key_id() {
934 let cmd = ExportCommand::parse_from([
936 "export",
937 "--addr",
938 "127.0.0.1:4000",
939 "--oss",
940 "--oss-bucket",
941 "test-bucket",
942 "--oss-access-key-id",
943 "", "--oss-access-key-secret",
945 "test-secret",
946 "--oss-endpoint",
947 "https://oss.example.com",
948 ]);
949
950 let result = cmd.build().await;
951 assert!(result.is_err());
952 if let Err(err) = result {
953 assert!(
954 err.to_string().contains("OSS access key ID must be set"),
955 "Actual error: {}",
956 err
957 );
958 }
959 }
960
961 #[tokio::test]
962 async fn test_export_command_build_with_oss_missing_endpoint() {
963 let cmd = ExportCommand::parse_from([
965 "export",
966 "--addr",
967 "127.0.0.1:4000",
968 "--oss",
969 "--oss-bucket",
970 "test-bucket",
971 "--oss-root",
972 "test-root",
973 "--oss-access-key-id",
974 "test-key-id",
975 "--oss-access-key-secret",
976 "test-secret",
977 ]);
978
979 let result = cmd.build().await;
980 assert!(result.is_err());
981 if let Err(err) = result {
982 assert!(
983 err.to_string().contains("OSS endpoint must be set"),
984 "Actual error: {}",
985 err
986 );
987 }
988 }
989
990 #[tokio::test]
991 async fn test_export_command_build_with_oss_multiple_missing_fields() {
992 let cmd = ExportCommand::parse_from([
994 "export",
995 "--addr",
996 "127.0.0.1:4000",
997 "--oss",
998 "--oss-bucket",
999 "test-bucket",
1000 ]);
1002
1003 let result = cmd.build().await;
1004 assert!(result.is_err());
1005 if let Err(err) = result {
1006 let err_str = err.to_string();
1007 assert!(
1009 err_str.contains("OSS"),
1010 "Error should mention OSS: {}",
1011 err_str
1012 );
1013 assert!(
1014 err_str.contains("must be set"),
1015 "Error should mention required fields: {}",
1016 err_str
1017 );
1018 }
1019 }
1020
1021 #[tokio::test]
1022 async fn test_export_command_build_with_gcs_empty_bucket() {
1023 let cmd = ExportCommand::parse_from([
1025 "export",
1026 "--addr",
1027 "127.0.0.1:4000",
1028 "--gcs",
1029 "--gcs-bucket",
1030 "", "--gcs-root",
1032 "test-root",
1033 "--gcs-scope",
1034 "test-scope",
1035 ]);
1036
1037 let result = cmd.build().await;
1038 assert!(result.is_err());
1039 if let Err(err) = result {
1040 assert!(
1041 err.to_string().contains("GCS bucket must be set"),
1042 "Actual error: {}",
1043 err
1044 );
1045 }
1046 }
1047
1048 #[tokio::test]
1049 async fn test_export_command_build_with_gcs_empty_root() {
1050 let cmd = ExportCommand::parse_from([
1052 "export",
1053 "--addr",
1054 "127.0.0.1:4000",
1055 "--gcs",
1056 "--gcs-bucket",
1057 "test-bucket",
1058 "--gcs-root",
1059 "", "--gcs-scope",
1061 "test-scope",
1062 "--gcs-credential-path",
1063 "/path/to/credential",
1064 "--gcs-credential",
1065 "test-credential",
1066 "--gcs-endpoint",
1067 "https://storage.googleapis.com",
1068 ]);
1069
1070 let result = cmd.build().await;
1071 assert!(result.is_err());
1072 if let Err(err) = result {
1073 assert!(
1074 err.to_string().contains("GCS root must be set"),
1075 "Actual error: {}",
1076 err
1077 );
1078 }
1079 }
1080
1081 #[tokio::test]
1082 async fn test_export_command_build_with_azblob_empty_account_name() {
1083 let cmd = ExportCommand::parse_from([
1085 "export",
1086 "--addr",
1087 "127.0.0.1:4000",
1088 "--azblob",
1089 "--azblob-container",
1090 "test-container",
1091 "--azblob-root",
1092 "test-root",
1093 "--azblob-account-name",
1094 "", "--azblob-account-key",
1096 "test-key",
1097 "--azblob-endpoint",
1098 "https://account.blob.core.windows.net",
1099 ]);
1100
1101 let result = cmd.build().await;
1102 assert!(result.is_err());
1103 if let Err(err) = result {
1104 assert!(
1105 err.to_string().contains("AzBlob account name must be set"),
1106 "Actual error: {}",
1107 err
1108 );
1109 }
1110 }
1111
1112 #[tokio::test]
1113 async fn test_export_command_build_with_azblob_missing_account_key() {
1114 let cmd = ExportCommand::parse_from([
1116 "export",
1117 "--addr",
1118 "127.0.0.1:4000",
1119 "--azblob",
1120 "--azblob-container",
1121 "test-container",
1122 "--azblob-root",
1123 "test-root",
1124 "--azblob-account-name",
1125 "test-account",
1126 "--azblob-endpoint",
1127 "https://account.blob.core.windows.net",
1128 ]);
1129
1130 let result = cmd.build().await;
1131 assert!(result.is_err());
1132 if let Err(err) = result {
1133 assert!(
1134 err.to_string()
1135 .contains("AzBlob account key (when sas_token is not provided) must be set"),
1136 "Actual error: {}",
1137 err
1138 );
1139 }
1140 }
1141
1142 #[tokio::test]
1145 async fn test_export_command_build_with_no_storage() {
1146 let cmd = ExportCommand::parse_from(["export", "--addr", "127.0.0.1:4000"]);
1148
1149 let result = cmd.build().await;
1150 assert!(result.is_err());
1151 if let Err(err) = result {
1152 assert!(
1153 err.to_string().contains("Output directory not set"),
1154 "Actual error: {}",
1155 err
1156 );
1157 }
1158 }
1159
1160 #[tokio::test]
1161 async fn test_export_command_build_with_s3_minimal_config() {
1162 let cmd = ExportCommand::parse_from([
1164 "export",
1165 "--addr",
1166 "127.0.0.1:4000",
1167 "--s3",
1168 "--s3-bucket",
1169 "test-bucket",
1170 "--s3-access-key-id",
1171 "test-key",
1172 "--s3-secret-access-key",
1173 "test-secret",
1174 "--s3-region",
1175 "us-west-2",
1176 ]);
1178
1179 let result = cmd.build().await;
1180 assert!(result.is_ok(), "Minimal S3 config should succeed");
1181 }
1182
1183 #[tokio::test]
1184 async fn test_export_command_build_with_oss_minimal_config() {
1185 let cmd = ExportCommand::parse_from([
1187 "export",
1188 "--addr",
1189 "127.0.0.1:4000",
1190 "--oss",
1191 "--oss-bucket",
1192 "test-bucket",
1193 "--oss-access-key-id",
1194 "test-key-id",
1195 "--oss-access-key-secret",
1196 "test-secret",
1197 "--oss-endpoint",
1198 "https://oss.example.com",
1199 ]);
1201
1202 let result = cmd.build().await;
1203 assert!(result.is_ok(), "Minimal OSS config should succeed");
1204 }
1205
1206 #[tokio::test]
1207 async fn test_export_command_build_with_gcs_minimal_config() {
1208 let cmd = ExportCommand::parse_from([
1210 "export",
1211 "--addr",
1212 "127.0.0.1:4000",
1213 "--gcs",
1214 "--gcs-bucket",
1215 "test-bucket",
1216 "--gcs-root",
1217 "test-root",
1218 "--gcs-scope",
1219 "test-scope",
1220 ]);
1222
1223 let result = cmd.build().await;
1224 assert!(result.is_ok(), "Minimal GCS config should succeed");
1225 }
1226
1227 #[tokio::test]
1228 async fn test_export_command_build_with_azblob_minimal_config() {
1229 let cmd = ExportCommand::parse_from([
1231 "export",
1232 "--addr",
1233 "127.0.0.1:4000",
1234 "--azblob",
1235 "--azblob-container",
1236 "test-container",
1237 "--azblob-root",
1238 "test-root",
1239 "--azblob-account-name",
1240 "test-account",
1241 "--azblob-account-key",
1242 "test-key",
1243 "--azblob-endpoint",
1244 "https://account.blob.core.windows.net",
1245 ]);
1247
1248 let result = cmd.build().await;
1249 assert!(result.is_ok(), "Minimal AzBlob config should succeed");
1250 }
1251
1252 #[tokio::test]
1253 async fn test_export_command_build_with_local_and_s3() {
1254 let temp_dir = create_temp_dir("test_export_local_and_s3");
1256 let output_dir = temp_dir.path().to_str().unwrap();
1257
1258 let cmd = ExportCommand::parse_from([
1259 "export",
1260 "--addr",
1261 "127.0.0.1:4000",
1262 "--output-dir",
1263 output_dir,
1264 "--s3",
1265 "--s3-bucket",
1266 "test-bucket",
1267 "--s3-access-key-id",
1268 "test-key",
1269 "--s3-secret-access-key",
1270 "test-secret",
1271 "--s3-region",
1272 "us-west-2",
1273 ]);
1274
1275 let result = cmd.build().await;
1276 assert!(
1277 result.is_ok(),
1278 "S3 should be selected when both are provided"
1279 );
1280 }
1281
1282 #[tokio::test]
1285 async fn test_export_command_build_with_azblob_only_sas_token() {
1286 let cmd = ExportCommand::parse_from([
1288 "export",
1289 "--addr",
1290 "127.0.0.1:4000",
1291 "--azblob",
1292 "--azblob-container",
1293 "test-container",
1294 "--azblob-root",
1295 "test-root",
1296 "--azblob-account-name",
1297 "test-account",
1298 "--azblob-endpoint",
1299 "https://account.blob.core.windows.net",
1300 "--azblob-sas-token",
1301 "test-sas-token",
1302 ]);
1304
1305 let result = cmd.build().await;
1306 assert!(
1307 result.is_ok(),
1308 "AzBlob with only sas_token should succeed: {:?}",
1309 result.err()
1310 );
1311 }
1312
1313 #[tokio::test]
1314 async fn test_export_command_build_with_azblob_empty_account_key_with_sas() {
1315 let cmd = ExportCommand::parse_from([
1317 "export",
1318 "--addr",
1319 "127.0.0.1:4000",
1320 "--azblob",
1321 "--azblob-container",
1322 "test-container",
1323 "--azblob-root",
1324 "test-root",
1325 "--azblob-account-name",
1326 "test-account",
1327 "--azblob-account-key",
1328 "", "--azblob-endpoint",
1330 "https://account.blob.core.windows.net",
1331 "--azblob-sas-token",
1332 "test-sas-token",
1333 ]);
1334
1335 let result = cmd.build().await;
1336 assert!(
1337 result.is_ok(),
1338 "AzBlob with empty account_key but sas_token should succeed: {:?}",
1339 result.err()
1340 );
1341 }
1342}