|
| 1 | +use clap::Args; |
| 2 | +use futures::{stream, StreamExt, TryStreamExt}; |
| 3 | +use graphql_client::GraphQLQuery; |
| 4 | +use indicatif::{HumanBytes, MultiProgress, ProgressBar}; |
| 5 | +use serde::Serialize; |
| 6 | +use std::path::PathBuf; |
| 7 | +use url::Url; |
| 8 | + |
| 9 | +use crate::{ |
| 10 | + commands::{ |
| 11 | + dataset::{ |
| 12 | + common::{get_dataset_by_slug, DatasetCommonArgs}, |
| 13 | + download::get_dataset_version_files::GetDatasetVersionFilesNodeOnDatasetVersionFilesNodes, |
| 14 | + version::common::get_dataset_version, |
| 15 | + }, |
| 16 | + GlobalArgs, |
| 17 | + }, |
| 18 | + download::{multipart_download, MultipartOptions}, |
| 19 | + error::{self, Result}, |
| 20 | +}; |
| 21 | + |
| 22 | +#[derive(Args, Debug, Serialize)] |
| 23 | +pub struct Download { |
| 24 | + #[command(flatten)] |
| 25 | + common: DatasetCommonArgs, |
| 26 | + #[arg(short, long)] |
| 27 | + version: semver::Version, |
| 28 | + #[arg(short, long)] |
| 29 | + destination: PathBuf, |
| 30 | + #[clap(long, short = 'c', default_value_t = 10_000_000)] |
| 31 | + chunk_size: usize, |
| 32 | + #[clap(long, default_value_t = 10)] |
| 33 | + part_download_concurrency: usize, |
| 34 | + #[clap(long, default_value_t = 10)] |
| 35 | + file_download_concurrency: usize, |
| 36 | +} |
| 37 | + |
| 38 | +#[derive(GraphQLQuery)] |
| 39 | +#[graphql( |
| 40 | + query_path = "src/graphql/get_dataset_version_files.graphql", |
| 41 | + schema_path = "schema.graphql", |
| 42 | + response_derives = "Debug" |
| 43 | +)] |
| 44 | +pub struct GetDatasetVersionFiles; |
| 45 | + |
| 46 | +pub async fn download(args: Download, global: GlobalArgs) -> Result<()> { |
| 47 | + let m = MultiProgress::new(); |
| 48 | + |
| 49 | + let client = global.graphql_client().await?; |
| 50 | + |
| 51 | + let (owner, local_slug) = args.common.slug_pair()?; |
| 52 | + let multipart_options = MultipartOptions::new(args.chunk_size, args.part_download_concurrency); |
| 53 | + |
| 54 | + let dataset = get_dataset_by_slug(&global, owner, local_slug).await?; |
| 55 | + if !dataset.viewer_can_read_dataset_version_file { |
| 56 | + return Err(error::user( |
| 57 | + "Permission denied", |
| 58 | + "Cannot read dataset files", |
| 59 | + )); |
| 60 | + } |
| 61 | + |
| 62 | + let dataset_version = get_dataset_version( |
| 63 | + &client, |
| 64 | + dataset.id, |
| 65 | + args.version.major as _, |
| 66 | + args.version.minor as _, |
| 67 | + args.version.patch as _, |
| 68 | + ) |
| 69 | + .await? |
| 70 | + .ok_or_else(|| error::user("Not found", "Dataset version not found"))?; |
| 71 | + |
| 72 | + let response = client |
| 73 | + .send::<GetDatasetVersionFiles>(get_dataset_version_files::Variables { |
| 74 | + dataset_version_id: dataset_version.id, |
| 75 | + }) |
| 76 | + .await?; |
| 77 | + |
| 78 | + let dataset_version_files = match response.node { |
| 79 | + get_dataset_version_files::GetDatasetVersionFilesNode::DatasetVersion(v) => v, |
| 80 | + _ => { |
| 81 | + return Err(error::system( |
| 82 | + "Invalid node type", |
| 83 | + "Unexpected GraphQL response", |
| 84 | + )) |
| 85 | + } |
| 86 | + }; |
| 87 | + |
| 88 | + let nodes = dataset_version_files.files.nodes; |
| 89 | + let dataset_name = dataset_version_files.dataset.name; |
| 90 | + |
| 91 | + let dataset_dir = args.destination.join(&dataset_name); |
| 92 | + tokio::fs::create_dir_all(&dataset_dir).await?; |
| 93 | + |
| 94 | + let total_size = dataset_version.size as u64; |
| 95 | + let total_files = nodes.len(); |
| 96 | + |
| 97 | + let overall_progress = m.add(global.spinner().with_message(format!( |
| 98 | + "Downloading '{}' ({} files, {})", |
| 99 | + dataset_name, |
| 100 | + total_files, |
| 101 | + HumanBytes(total_size) |
| 102 | + ))); |
| 103 | + |
| 104 | + stream::iter(nodes) |
| 105 | + .map(|node| { |
| 106 | + let client = &client; |
| 107 | + let m = &m; |
| 108 | + let multipart_options = &multipart_options; |
| 109 | + let dataset_dir = dataset_dir.to_owned(); |
| 110 | + let dataset_name = dataset_name.to_owned(); |
| 111 | + |
| 112 | + async move { |
| 113 | + download_partition_file( |
| 114 | + &m, |
| 115 | + &client, |
| 116 | + &multipart_options, |
| 117 | + &dataset_dir, |
| 118 | + &dataset_name, |
| 119 | + node, |
| 120 | + ) |
| 121 | + .await |
| 122 | + } |
| 123 | + }) |
| 124 | + .buffer_unordered(args.file_download_concurrency) |
| 125 | + .try_collect::<()>() |
| 126 | + .await?; |
| 127 | + |
| 128 | + overall_progress.finish_with_message("Done"); |
| 129 | + |
| 130 | + Ok(()) |
| 131 | +} |
| 132 | + |
| 133 | +async fn download_partition_file( |
| 134 | + m: &MultiProgress, |
| 135 | + client: &aqora_client::Client, |
| 136 | + multipart_options: &MultipartOptions, |
| 137 | + output_dir: &std::path::Path, |
| 138 | + dataset_name: &str, |
| 139 | + file_node: GetDatasetVersionFilesNodeOnDatasetVersionFilesNodes, |
| 140 | +) -> Result<()> { |
| 141 | + let metadata = client.s3_head(file_node.url.clone()).await?; |
| 142 | + let filename = format!("{}-{}.parquet", dataset_name, file_node.partition_num); |
| 143 | + let output_path = output_dir.join(&filename); |
| 144 | + |
| 145 | + if let Ok(existing) = tokio::fs::metadata(&output_path).await { |
| 146 | + if existing.len() == metadata.size { |
| 147 | + return Ok(()); |
| 148 | + } |
| 149 | + } |
| 150 | + |
| 151 | + tokio::fs::create_dir_all(output_path.parent().unwrap()).await?; |
| 152 | + |
| 153 | + let temp = tempfile::NamedTempFile::new_in(output_dir)?; |
| 154 | + let temp_path = temp.path().to_owned(); |
| 155 | + |
| 156 | + let pb = m.add(ProgressBar::new_spinner()); |
| 157 | + pb.set_message(filename); |
| 158 | + |
| 159 | + multipart_download( |
| 160 | + client, |
| 161 | + metadata.size, |
| 162 | + file_node.url.clone(), |
| 163 | + multipart_options, |
| 164 | + &temp_path, |
| 165 | + &pb, |
| 166 | + ) |
| 167 | + .await?; |
| 168 | + |
| 169 | + pb.finish_and_clear(); |
| 170 | + tokio::fs::rename(&temp_path, &output_path).await?; |
| 171 | + |
| 172 | + Ok(()) |
| 173 | +} |
0 commit comments