bitslides/bitslideslib/
mod.rs1use anyhow::{bail, Result};
2use chrono::prelude::*;
3use config::GlobalConfig;
4use fs::MoveRequest;
5use slide::Slide;
6use std::{
7 collections::HashMap,
8 path::{Path, PathBuf},
9};
10use syncjob::{SyncJob, SyncJobs};
11use tokio::{
12 fs::OpenOptions,
13 io::AsyncWriteExt,
14 sync::mpsc::{self, Sender},
15};
16use volume::Volume;
17
18#[cfg(target_os = "windows")]
19use std::ffi::CStr;
20
21pub mod config;
22mod fs;
23mod slide;
24mod syncjob;
25mod volume;
26
27const DEFAULT_SLIDE_CONFIG_FILE: &str = ".slide.yml";
28
29pub async fn slide(config: GlobalConfig) -> Result<()> {
35 log::debug!("Config: {config:#?}");
36
37 let mut tracer = None;
38
39 let mut volumes = HashMap::new();
40
41 for rootset_config in config.rootsets {
42 let some_volumes = identify_env(&rootset_config.keyword, &rootset_config.roots);
43 match some_volumes {
44 Ok(v) => volumes.extend(v),
45 Err(_) => log::warn!("Error processing some volumes"),
46 }
47 }
48
49 log::debug!("Volumes for all configs: {volumes:#?}");
50
51 let syncjobs = build_syncjobs(&mut volumes)?;
52
53 log::debug!("Sync jobs: {syncjobs:#?}");
54
55 let trace = match config.trace {
56 Some(trace) => {
57 let mut file = OpenOptions::new()
58 .create(true)
59 .append(true)
60 .open(trace)
61 .await?;
62 let (tx, mut rx) = mpsc::channel::<Option<String>>(32);
63 tracer = Some(tokio::spawn(async move {
65 while let Some(message) = rx.recv().await {
66 let message = match message {
67 Some(m) => {
68 format!("[{}] {}\n", Local::now().format("%Y-%m-%d %H:%M:%S"), m)
69 }
70 None => "".to_owned(),
71 };
72 let _ = file.write_all(message.as_bytes()).await;
74 }
75 }));
76 Some(tx)
77 }
78 None => None,
79 };
80
81 let move_req = MoveRequest {
82 collision: config.collision,
83 safe: false,
84 check: config.check,
85 retries: 5,
86 };
87
88 let result = execute_syncjobs(&volumes, &syncjobs, config.dry_run, trace, &move_req).await;
89
90 if let Some(tracer) = tracer {
91 tracer.await?;
92 }
93
94 result
95}
96
97pub async fn tidy_up() {
102 unimplemented!();
103 }
120
121fn identify_volumes(root: &Path, keyword: &str) -> Result<HashMap<String, Volume>> {
127 let mut volumes = HashMap::new();
128
129 if !root.is_dir() {
131 bail!("{} is not a folder", root.to_string_lossy());
132 }
133
134 let entries = root.read_dir();
135 if entries.is_err() {
136 bail!("{} cannot be read", root.to_string_lossy());
137 }
138
139 for entry in entries?.flatten() {
141 let file_type = entry.file_type();
142 if let Ok(file_type) = file_type {
143 if file_type.is_dir() {
144 if let Some(volume) = Volume::from_path(entry.path(), keyword) {
145 volumes.insert(volume.name.clone(), volume);
146 }
147 }
148 }
149 }
150
151 Ok(volumes)
152}
153
154fn identify_slides(volume: &mut Volume) -> Result<()> {
159 let subfolders = volume.path.join(&volume.keyword).read_dir();
160
161 if subfolders.is_err() {
162 bail!("Unable to read the folder: {volume:?}");
163 }
164
165 for entry in subfolders?.flatten() {
166 let entry_metadata = entry.metadata();
167 if entry_metadata.is_err() {
168 continue;
169 }
170 if entry_metadata.unwrap().is_dir() {
171 let entry_fullpath = entry.path();
172 let entry_name = entry_fullpath
173 .file_name()
174 .unwrap()
175 .to_string_lossy()
176 .to_string();
177
178 let slide_conf = {
180 let slide_conf =
181 config::read_slide_config(entry_fullpath.join(DEFAULT_SLIDE_CONFIG_FILE));
182 match slide_conf {
183 Ok(s) => s.route,
184 Err(_) => None,
185 }
186 };
187
188 volume.add_slide(Slide::new(entry_name, entry_fullpath, slide_conf));
189 }
190 }
191
192 Ok(())
193}
194
195pub fn identify_env(keyword: &str, roots: &[PathBuf]) -> Result<HashMap<String, Volume>> {
200 let mut volumes: HashMap<String, Volume> = HashMap::new();
201
202 {
204 for root in roots {
206 match identify_volumes(root, keyword) {
207 Ok(v) => volumes.extend(v),
208 Err(e) => log::warn!("{e}"),
209 }
210 }
211
212 #[cfg(target_os = "windows")]
214 {
215 let drives = {
217 let mut result = Vec::new();
218 const MAX_BUF: usize = 1024;
219 let mut buf = [0u8; MAX_BUF];
220 let length = unsafe {
221 windows::Win32::Storage::FileSystem::GetLogicalDriveStringsA(Some(&mut buf))
222 } as usize;
223 if length > MAX_BUF {
224 log::error!(
225 "The hardcoded buffer is not big enough to retrieve all logical drives"
226 );
227 }
228 let mut ptr = 0;
229 while ptr < length {
230 let drive = CStr::from_bytes_until_nul(&buf[ptr..]).unwrap();
231 let offset_to_next = 1 + drive.count_bytes();
232 ptr += offset_to_next;
233 result.push(PathBuf::from(drive.to_str().unwrap()));
234 }
235 result
236 };
237
238 for drive in drives {
239 if let Some(volume) = Volume::from_path(drive, keyword) {
240 volumes.insert(volume.name.clone(), volume);
241 }
242 }
243 }
244 }
245
246 for (_, volume) in volumes.iter_mut() {
248 match identify_slides(volume) {
249 Ok(_) => {}
250 Err(e) => log::warn!("{e}"),
251 }
252 }
253
254 Ok(volumes)
255}
256
257fn build_syncjobs(volumes: &mut HashMap<String, Volume>) -> Result<SyncJobs> {
262 let mut syncjobs = Vec::new();
263
264 for src_name in volumes.keys() {
265 for (dst_name, slide) in &volumes[src_name].slides {
266 if src_name == dst_name {
267 continue;
268 }
269
270 if volumes.contains_key(dst_name) {
272 syncjobs.push(SyncJob {
273 src: src_name.to_owned(),
274 dst: dst_name.to_owned(),
275 issue: dst_name.to_owned(),
276 });
277 continue;
278 }
279
280 match &slide.or_else {
281 Some(default_route) => {
283 if volumes.contains_key(default_route) {
284 syncjobs.push(SyncJob {
285 src: src_name.to_owned(),
286 dst: default_route.to_owned(),
287 issue: dst_name.to_owned(),
288 });
289 continue;
290 }
291 log::info!("default_route {default_route} not available");
292 }
293 _ => {
294 log::info!("{dst_name} not available and no default route");
295 }
296 }
297 }
298 }
299
300 for syncjob in &syncjobs {
302 if !volumes[&syncjob.dst].slides.contains_key(&syncjob.issue) {
303 volumes
304 .get_mut(&syncjob.dst)
305 .unwrap()
306 .create_slide(&syncjob.issue)?;
307 }
308 }
309
310 Ok(syncjobs)
311}
312
313async fn execute_syncjobs(
318 volumes: &HashMap<String, Volume>,
319 syncjobs: &SyncJobs,
320 dry_run: bool,
321 tracer: Option<Sender<Option<String>>>,
322 move_req: &MoveRequest,
323) -> Result<()> {
324 let mut handles = Vec::new();
325
326 if let Some(tracer) = &tracer {
327 tracer
328 .send(Some("Starting slides sync...".to_owned()))
329 .await?;
330 }
331
332 {
334 for syncjob in syncjobs {
335 log::debug!("Syncing {:?}", syncjob);
336 let syncjob = syncjob.clone();
337 let src = volumes[&syncjob.src].slides[&syncjob.issue].path.clone();
338 let dst = volumes[&syncjob.dst].slides[&syncjob.issue].path.clone();
339 let trace = tracer.clone();
340 let move_req = move_req.clone();
341 let handle = tokio::spawn(async move {
342 if let Err(e) = sync_slide(&syncjob, &src, &dst, dry_run, trace, &move_req).await {
343 bail!("Error syncing {:?} -> {:?}: {:?}", src, dst, e);
344 }
345 Ok(())
346 });
347 handles.push(handle);
348 }
349
350 for handle in handles {
351 handle.await??;
352 }
353 }
354
355 if let Some(tracer) = &tracer {
356 tracer.send(None).await?;
357 }
358
359 Ok(())
360}
361
362async fn sync_slide(
365 syncjob: &SyncJob,
366 src: &PathBuf,
367 dst: &Path,
368 dry_run: bool,
369 tracer: Option<Sender<Option<String>>>,
370 move_req: &MoveRequest,
371) -> Result<()> {
372 log::info!("Syncing {:?}", syncjob);
373
374 let tracer = tracer.map(|t| (t, syncjob));
375
376 let entries = src.read_dir();
377 if entries.is_err() {
378 bail!("{src:?} cannot be read");
379 }
380
381 for entry in entries?.flatten() {
382 let entry_path = entry.path();
383 let file_type = entry.file_type();
384 if let Ok(file_type) = file_type {
385 if !file_type.is_dir() {
387 log::warn!("{} is not a directory", entry_path.display());
388 continue;
389 }
390 let dst = dst.join(entry.file_name());
391 fs::sync(&entry_path, &dst, dry_run, &tracer, move_req).await?;
392 }
393 }
394
395 Ok(())
396}
397
398#[cfg(test)]
399mod tests;