#![doc(
html_logo_url = "https://raw.githubusercontent.com/maidsafe/QA/master/Images/maidsafe_logo.png",
html_favicon_url = "https://maidsafe.net/img/favicon.ico",
test(attr(forbid(warnings)))
)]
#![forbid(
arithmetic_overflow,
mutable_transmutes,
no_mangle_const_items,
unknown_crate_types
)]
#![deny(
bad_style,
deprecated,
improper_ctypes,
missing_docs,
non_shorthand_field_patterns,
overflowing_literals,
stable_features,
unconditional_recursion,
unknown_lints,
unsafe_code,
unused,
unused_allocation,
unused_attributes,
unused_comparisons,
unused_features,
unused_parens,
while_true,
warnings
)]
#![warn(
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
#![allow(
box_pointers,
missing_copy_implementations,
missing_debug_implementations,
variant_size_differences,
non_camel_case_types
)]
#![allow(clippy::cast_lossless, clippy::decimal_literal_representation)]
mod chunk;
mod data_map;
mod decrypt;
mod encrypt;
mod encryption;
mod error;
pub mod test_helpers;
#[cfg(test)]
mod tests;
use self::encryption::{Iv, Key, Pad, IV_SIZE, KEY_SIZE, PAD_SIZE};
pub use self::{
data_map::{ChunkInfo, DataMap},
error::{Error, Result},
};
use bytes::Bytes;
use chunk::batch_positions;
use decrypt::decrypt_chunk;
use encrypt::encrypt_chunk;
use itertools::Itertools;
use std::{
collections::BTreeMap,
fs::{File, OpenOptions},
io::{Read, Seek, SeekFrom, Write},
ops::Range,
path::{Path, PathBuf},
};
use tempfile::{tempdir, TempDir};
use xor_name::XorName;
pub use bytes;
pub use xor_name;
pub const MIN_ENCRYPTABLE_BYTES: usize = 3 * MIN_CHUNK_SIZE;
pub const MAX_CHUNK_SIZE: usize = 512 * 1024;
pub const MIN_CHUNK_SIZE: usize = 1024;
pub const COMPRESSION_QUALITY: i32 = 6;
#[derive(Clone)]
pub struct EncryptedChunk {
pub index: usize,
pub content: Bytes,
}
#[derive(Clone)]
pub struct StreamSelfEncryptor {
file_path: Box<PathBuf>,
batch_positions: Vec<(usize, usize)>,
chunk_index: usize,
data_map: Vec<ChunkInfo>,
src_hashes: BTreeMap<usize, XorName>,
}
impl StreamSelfEncryptor {
pub fn encrypt_from_file(file_path: Box<PathBuf>) -> Result<Self> {
let file = File::open(&*file_path)?;
let metadata = file.metadata()?;
let file_size = metadata.len();
let batch_positions = batch_positions(file_size as usize);
Ok(StreamSelfEncryptor {
file_path,
batch_positions,
chunk_index: 0,
data_map: Vec::new(),
src_hashes: BTreeMap::new(),
})
}
pub fn next_encryption(&mut self) -> Result<(Option<EncryptedChunk>, Option<DataMap>)> {
if self.chunk_index >= self.batch_positions.len() {
return Ok((None, Some(DataMap::new(self.data_map.clone()))));
}
let (src_hash, content) = self.read_chunk(self.chunk_index)?;
let pki = self.get_pad_key_and_iv(src_hash)?;
let encrypted_content = encrypt_chunk(content, pki)?;
let dst_hash = XorName::from_content(encrypted_content.as_ref());
let index = self.chunk_index;
self.chunk_index += 1;
let (start_pos, end_pos) = self.batch_positions[index];
self.data_map.push(ChunkInfo {
index,
dst_hash,
src_hash,
src_size: end_pos - start_pos,
});
Ok((
Some(EncryptedChunk {
index,
content: encrypted_content,
}),
None,
))
}
fn read_chunk(&mut self, chunk_index: usize) -> Result<(XorName, Bytes)> {
let (start_pos, end_pos) = self.batch_positions[chunk_index];
let mut buffer = vec![0; end_pos - start_pos];
let mut file = File::open(&*self.file_path)?;
let _ = file.seek(SeekFrom::Start(start_pos as u64))?;
file.read_exact(&mut buffer)?;
let content = Bytes::from(buffer);
let src_hash = XorName::from_content(content.as_ref());
let _ = self.src_hashes.insert(chunk_index, src_hash);
Ok((src_hash, content))
}
fn get_pad_key_and_iv(&mut self, src_hash: XorName) -> Result<(Pad, Key, Iv)> {
let (n_1, n_2) = get_n_1_n_2(self.chunk_index, self.batch_positions.len());
let n_1_src_hash = self.get_src_chunk_name(n_1)?;
let n_2_src_hash = self.get_src_chunk_name(n_2)?;
Ok(get_pki(&src_hash, &n_1_src_hash, &n_2_src_hash))
}
fn get_src_chunk_name(&mut self, index: usize) -> Result<XorName> {
if let Some(name) = self.src_hashes.get(&index) {
Ok(*name)
} else {
let (src_hash, _content) = self.read_chunk(index)?;
Ok(src_hash)
}
}
}
pub struct StreamSelfDecryptor {
file_path: Box<PathBuf>,
chunk_index: usize,
src_hashes: Vec<XorName>,
encrypted_chunks: BTreeMap<usize, XorName>,
temp_dir: TempDir,
}
impl StreamSelfDecryptor {
pub fn decrypt_to_file(file_path: Box<PathBuf>, data_map: &DataMap) -> Result<Self> {
let temp_dir = tempdir()?;
let src_hashes = extract_hashes(data_map);
Ok(StreamSelfDecryptor {
file_path,
chunk_index: 0,
src_hashes,
encrypted_chunks: BTreeMap::new(),
temp_dir,
})
}
pub fn next_encrypted(&mut self, encrypted_chunk: EncryptedChunk) -> Result<bool> {
if encrypted_chunk.index == self.chunk_index {
let decrypted_content =
decrypt_chunk(self.chunk_index, encrypted_chunk.content, &self.src_hashes)?;
self.append_to_file(&decrypted_content)?;
self.chunk_index += 1;
self.drain_unprocessed()?;
if self.chunk_index == self.src_hashes.len() {
return Ok(true);
}
} else {
let chunk_name = XorName::from_content(&encrypted_chunk.content);
let file_path = self.temp_dir.path().join(hex::encode(chunk_name));
let mut output_file = File::create(file_path)?;
output_file.write_all(&encrypted_chunk.content)?;
let _ = self
.encrypted_chunks
.insert(encrypted_chunk.index, chunk_name);
}
Ok(false)
}
fn append_to_file(&self, content: &Bytes) -> std::io::Result<()> {
let mut file = OpenOptions::new()
.write(true)
.append(true)
.create(true)
.open(&*self.file_path)?;
file.write_all(content)?;
Ok(())
}
fn drain_unprocessed(&mut self) -> Result<()> {
while let Some(chunk_name) = self.encrypted_chunks.get(&self.chunk_index) {
let file_path = self.temp_dir.path().join(&hex::encode(chunk_name));
let mut chunk_file = File::open(file_path)?;
let mut chunk_data = Vec::new();
let _ = chunk_file.read_to_end(&mut chunk_data)?;
let decrypted_content =
decrypt_chunk(self.chunk_index, chunk_data.into(), &self.src_hashes)?;
self.append_to_file(&decrypted_content)?;
self.chunk_index += 1;
}
Ok(())
}
}
pub fn encrypt_from_file(file_path: &Path, output_dir: &Path) -> Result<(DataMap, Vec<XorName>)> {
let mut file = File::open(file_path)?;
let mut bytes = Vec::new();
let _ = file.read_to_end(&mut bytes)?;
let bytes = Bytes::from(bytes);
let (data_map, encrypted_chunks) = encrypt(bytes)?;
let mut chunk_names = Vec::new();
for chunk in encrypted_chunks {
let chunk_name = XorName::from_content(&chunk.content);
chunk_names.push(chunk_name);
let file_path = output_dir.join(&hex::encode(chunk_name));
let mut output_file = File::create(file_path)?;
output_file.write_all(&chunk.content)?;
}
Ok((data_map, chunk_names))
}
pub fn decrypt_from_chunk_files(
chunk_dir: &Path,
data_map: &DataMap,
output_filepath: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let mut encrypted_chunks = Vec::new();
for chunk_info in data_map.infos() {
let chunk_name = chunk_info.dst_hash;
let file_path = chunk_dir.join(&hex::encode(chunk_name));
let mut chunk_file = File::open(file_path)?;
let mut chunk_data = Vec::new();
let _ = chunk_file.read_to_end(&mut chunk_data)?;
encrypted_chunks.push(EncryptedChunk {
index: chunk_info.index,
content: Bytes::from(chunk_data),
});
}
let decrypted_content = decrypt_full_set(data_map, &encrypted_chunks)?;
let mut output_file = File::create(output_filepath)?;
output_file.write_all(&decrypted_content)?;
Ok(())
}
pub fn encrypt(bytes: Bytes) -> Result<(DataMap, Vec<EncryptedChunk>)> {
if (MIN_ENCRYPTABLE_BYTES) > bytes.len() {
return Err(Error::Generic(format!(
"Too small for self-encryption! Required size at least {}",
MIN_ENCRYPTABLE_BYTES
)));
}
let (num_chunks, batches) = chunk::batch_chunks(bytes);
let (data_map, encrypted_chunks) = encrypt::encrypt(batches);
if num_chunks > encrypted_chunks.len() {
return Err(Error::Encryption);
}
Ok((data_map, encrypted_chunks))
}
pub fn decrypt_full_set(data_map: &DataMap, chunks: &[EncryptedChunk]) -> Result<Bytes> {
let src_hashes = extract_hashes(data_map);
let sorted_chunks = chunks
.iter()
.sorted_by_key(|c| c.index)
.cloned() .collect_vec();
decrypt::decrypt(src_hashes, sorted_chunks)
}
pub fn decrypt_range(
data_map: &DataMap,
chunks: &[EncryptedChunk],
relative_pos: usize,
len: usize,
) -> Result<Bytes> {
let src_hashes = extract_hashes(data_map);
let encrypted_chunks = chunks
.iter()
.sorted_by_key(|c| c.index)
.cloned()
.collect_vec();
let mut bytes = decrypt::decrypt(src_hashes, encrypted_chunks)?;
if relative_pos >= bytes.len() {
return Ok(Bytes::new());
}
let _ = bytes.split_to(relative_pos);
bytes.truncate(len);
Ok(bytes)
}
pub(crate) fn xor(data: Bytes, &Pad(pad): &Pad) -> Bytes {
let vec: Vec<_> = data
.iter()
.zip(pad.iter().cycle())
.map(|(&a, &b)| a ^ b)
.collect();
Bytes::from(vec)
}
pub struct SeekInfo {
pub index_range: Range<usize>,
pub relative_pos: usize,
}
pub fn seek_info(file_size: usize, pos: usize, len: usize) -> SeekInfo {
let (start_index, end_index) = overlapped_chunks(file_size, pos, len);
SeekInfo {
index_range: start_index..end_index,
relative_pos: pos % get_chunk_size(file_size, start_index),
}
}
fn overlapped_chunks(file_size: usize, pos: usize, len: usize) -> (usize, usize) {
if file_size < (3 * MIN_CHUNK_SIZE) || pos >= file_size || len == 0 {
return (0, 0);
}
let end = match pos.checked_add(len) {
Some(end) => end,
None => file_size,
};
let start_index = get_chunk_index(file_size, pos);
let end_index = get_chunk_index(file_size, end);
(start_index, end_index)
}
fn extract_hashes(data_map: &DataMap) -> Vec<XorName> {
data_map.infos().iter().map(|c| c.src_hash).collect()
}
fn get_pad_key_and_iv(chunk_index: usize, chunk_hashes: &[XorName]) -> (Pad, Key, Iv) {
let (n_1, n_2) = get_n_1_n_2(chunk_index, chunk_hashes.len());
let src_hash = &chunk_hashes[chunk_index];
let n_1_src_hash = &chunk_hashes[n_1];
let n_2_src_hash = &chunk_hashes[n_2];
get_pki(src_hash, n_1_src_hash, n_2_src_hash)
}
fn get_n_1_n_2(chunk_index: usize, total_num_chunks: usize) -> (usize, usize) {
match chunk_index {
0 => (total_num_chunks - 1, total_num_chunks - 2),
1 => (0, total_num_chunks - 1),
n => (n - 1, n - 2),
}
}
fn get_pki(src_hash: &XorName, n_1_src_hash: &XorName, n_2_src_hash: &XorName) -> (Pad, Key, Iv) {
let mut pad = [0u8; PAD_SIZE];
let mut key = [0u8; KEY_SIZE];
let mut iv = [0u8; IV_SIZE];
for (pad_iv_el, element) in pad
.iter_mut()
.zip(src_hash.iter().chain(n_2_src_hash.iter()))
{
*pad_iv_el = *element;
}
for (key_el, element) in key.iter_mut().chain(iv.iter_mut()).zip(n_1_src_hash.iter()) {
*key_el = *element;
}
(Pad(pad), Key(key), Iv(iv))
}
fn get_num_chunks(file_size: usize) -> usize {
if file_size < (3 * MIN_CHUNK_SIZE) {
return 0;
}
if file_size < (3 * MAX_CHUNK_SIZE) {
return 3;
}
if file_size % MAX_CHUNK_SIZE == 0 {
file_size / MAX_CHUNK_SIZE
} else {
(file_size / MAX_CHUNK_SIZE) + 1
}
}
fn get_chunk_size(file_size: usize, chunk_index: usize) -> usize {
if file_size < 3 * MIN_CHUNK_SIZE {
return 0;
}
if file_size < 3 * MAX_CHUNK_SIZE {
if chunk_index < 2 {
return file_size / 3;
} else {
return file_size - (2 * (file_size / 3));
}
}
let total_chunks = get_num_chunks(file_size);
if chunk_index < total_chunks - 2 {
return MAX_CHUNK_SIZE;
}
let remainder = file_size % MAX_CHUNK_SIZE;
let penultimate = (total_chunks - 2) == chunk_index;
if remainder == 0 {
return MAX_CHUNK_SIZE;
}
if remainder < MIN_CHUNK_SIZE {
if penultimate {
MAX_CHUNK_SIZE - MIN_CHUNK_SIZE
} else {
MIN_CHUNK_SIZE + remainder
}
} else if penultimate {
MAX_CHUNK_SIZE
} else {
remainder
}
}
fn get_start_end_positions(file_size: usize, chunk_index: usize) -> (usize, usize) {
if get_num_chunks(file_size) == 0 {
return (0, 0);
}
let start = get_start_position(file_size, chunk_index);
(start, start + get_chunk_size(file_size, chunk_index))
}
fn get_start_position(file_size: usize, chunk_index: usize) -> usize {
let total_chunks = get_num_chunks(file_size);
if total_chunks == 0 {
return 0;
}
let last = (total_chunks - 1) == chunk_index;
let first_chunk_size = get_chunk_size(file_size, 0);
if last {
first_chunk_size * (chunk_index - 1) + get_chunk_size(file_size, chunk_index - 1)
} else {
first_chunk_size * chunk_index
}
}
fn get_chunk_index(file_size: usize, position: usize) -> usize {
let num_chunks = get_num_chunks(file_size);
if num_chunks == 0 {
return 0; }
let chunk_size = get_chunk_size(file_size, 0);
let remainder = file_size % chunk_size;
if remainder == 0
|| remainder >= MIN_CHUNK_SIZE
|| position < file_size - remainder - MIN_CHUNK_SIZE
{
usize::min(position / chunk_size, num_chunks - 1)
} else {
num_chunks - 1
}
}