use std::collections::hash_map::DefaultHasher;
use std::fs::File;
use std::hash::{Hash, Hasher};
use std::io::{BufReader, Read, Write};
use std::path::Path;
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::time::{Duration};
fn read_from_file<A: serde::de::DeserializeOwned>(database_name: &'static str) -> Vec<A>
{
let master_path = format!(".{}{}.{}", "/SixthDatabase/", database_name, "6db");
let backup_path = format!(".{}{}.{}", "/SixthDatabase/", database_name, "6db.bak");
let (master_file, backup_file) = (Path::new(&master_path), Path::new(&backup_path));
if !master_file.exists() && !backup_file.exists()
{
let out = vec![];
return out;
}
{
if master_file.exists()
{
let file = File::open(master_file).expect("Could not open master file for 6DB");
let mut line: Vec<u8> = vec![];
let result_of_bufreader = BufReader::new(file).read_to_end(&mut line);
if !line.is_empty() && result_of_bufreader.is_ok()
{
let result_of_bincode = bincode::deserialize(&line);
if result_of_bincode.is_ok()
{
return result_of_bincode.unwrap();
}
}
}
}
if backup_file.exists()
{
let file = File::open(backup_file).expect("Could not open backup file for 6DB");
let mut line: Vec<u8> = vec![];
let result_of_bufreader = BufReader::new(file).read_to_end(&mut line);
if !line.is_empty() && result_of_bufreader.is_ok()
{
let result_of_bincode = bincode::deserialize(&line);
if result_of_bincode.is_ok()
{
return result_of_bincode.unwrap();
}
}
}
panic!("Both master and backup database files are corrupt. To prevent erasure of data, delete your files manually if you intended this to happen!");
}
fn write_to_file<T>(data: &T, database_name: &'static str)
where T: serde::ser::Serialize
{
let master_path = format!(".{}{}.{}", "/SixthDatabase/", database_name, "6db");
let backup_path = format!(".{}{}.{}", "/SixthDatabase/", database_name, "6db.bak");
let (master_file, backup_file) = (Path::new(&master_path), Path::new(&backup_path));
if !master_file.exists()
{
if !std::fs::read_dir("./SixthDatabase/").is_ok()
{
std::fs::create_dir("./SixthDatabase/").expect("Could not create SixthDatabase directory");
}
}
let serialized = bincode::serialize(&data).expect("serialization failed");
{
let mut f = File::create(master_file).expect("Error opening file");
f.write_all(&serialized).expect("Could not write serialized data to master_file");
}
{
let mut f = File::create(backup_file).expect("Error opening file");
f.write_all(&serialized).expect("Could not write serialized data to backup_file");
}
}
fn make_thread<A: 'static>(instance: &Arc<Mutex<Database<A>>>)
where A: std::marker::Send, A: serde::de::DeserializeOwned, A: serde::ser::Serialize, A: std::hash::Hash
{
let reference = instance.clone();
instance.lock().unwrap().thread = Some(thread::spawn(move || {
loop {
let mut lock1 = reference.lock().expect("Failed to obtain 6db lock in saving thread");
let current_hash = hashme(&lock1.data);
if current_hash != lock1.old_hash
{
lock1.old_hash = current_hash;
write_to_file(&lock1.data, lock1.database_name);
}
if lock1.shutdown
{
break;
}
thread::sleep(Duration::from_secs(15));
}
}));
}
fn hashme<T>(obj: &T) -> u64
where
T: Hash,
{
let mut hasher = DefaultHasher::new();
obj.hash(&mut hasher);
hasher.finish()
}
pub struct Database<A> {
pub database_name: &'static str,
pub data: Vec<A>,
old_hash: u64,
thread: Option<thread::JoinHandle<()>>,
shutdown: bool,
}
impl<A: 'static> Database<A> where A: std::marker::Send, A: serde::de::DeserializeOwned, A: serde::ser::Serialize, A: std::hash::Hash {
pub fn new(db_name: &'static str) -> Arc<Mutex<Database<A>>> {
let from_disk = read_from_file(db_name);
let hashed = hashme(&from_disk);
let object = Arc::new(Mutex::new(Database { database_name: db_name, data: from_disk, old_hash: hashed, thread: None, shutdown: false }));
make_thread(&object);
return object;
}
pub fn drop(mut self)
{
self.shutdown = true;
let thread = self.thread.expect("Thread did not exist at cleanup");
thread.join().expect("could not join thread at cleanup");
}
}