use std::path::PathBuf;
use std::io;
use std::collections::HashSet;
use std::fs::File;
use std::io::prelude::*;
use std::string::String;
use std::vec::Vec;
use crate::scrapper;
pub struct Playlist {
pub urls: Vec<String>
}
impl Playlist {
// fn new(&mut self) -> Result<Vec<String>, io::Error> {
pub fn try_from(path: &PathBuf) -> Result<Self, io::Error> {
// Open and load the file.
let f = File::open(path)?;
let reader = io::BufReader::new(f);
// Read lines and filter the URLs.
let urls = reader.lines()
.filter_map(|line| {
let line = line.ok().unwrap();
if !line.starts_with('#') && line.starts_with("http") {
return Some(line.trim().to_string());
}
None
})
.collect::<Vec<String>>();
// Return new instance.
Ok(Playlist {
urls,
})
}
pub fn save(&self, target: PathBuf, videos: &[scrapper::Video]) -> Result<(), io::Error> {
fn format_title(video: &scrapper::Video) -> String {
format!(
"({} | {}) - {}",
video.channel, video.published, video.title.content,
)
}
let mut f = File::create(target)?;
// Write header.
f.write_all(b"# Created by YTitler\n")?;
f.write_all(b"# See: https://gitlab.com/n1_/ytitler\n\n")?;
f.write_all(b"#EXTM3U\n")?;
// Write content.
for v in videos {
f.write_fmt(format_args!(
"#EXTINF:{},{}\n",
v.duration.as_secs(),
format_title(v)
))?;
f.write_fmt(format_args!("{}\n", v.url))?;
}
f.sync_all()?;
Ok(())
}
pub fn remove_duplicities(&mut self) {
// Walks thru vector and with help of
// hash set determines which items are
// already in the vector and which are
// not. Preserves order of the vector
// items.
let mut set = HashSet::new();
self.urls.retain(|i| set.insert(i.clone()));
}
pub fn to_chunks(&self) -> Vec<Vec<String>> {
self.urls.chunks(10).map(|s| s.to_vec()).collect()
}
// let mut chunks: Vec<Vec<String>> = vec![];
// const LIMIT: f32 = 10.0;
// let steps = ((self.urls.len() as f32 / LIMIT) as f32).ceil() as i32;
// let mut i = 0;
// // Walk thru vector and grab LIMIT or the rest of
// // items and put them into chunks under a separated
// // vector.
// while i < steps {
// let to_drain = if urls.len() >= LIMIT as usize {
// LIMIT as usize
// } else {
// urls.len()
// };
// chunks.push(urls.drain(0..to_drain).collect());
// i += 1;
// info!("A chunk: {}/{}", i, steps);
// }
// chunks
// }
}