use std;
use std::mem;
use std::num::{Int, UnsignedInt};
use std::ptr;
use std::slice::bytes::{MutableByteVector, copy_memory};
use buffer::{ReadBuffer, WriteBuffer, BufferResult};
use buffer::BufferResult::{BufferUnderflow, BufferOverflow};
use symmetriccipher::{SynchronousStreamCipher, SymmetricCipherError};
pub fn write_u64_be(dst: &mut[u8], mut input: u64) {
assert!(dst.len() == 8);
input = input.to_be();
unsafe {
let tmp = &input as *const _ as *const u8;
ptr::copy_nonoverlapping_memory(dst.get_unchecked_mut(0), tmp, 8);
}
}
pub fn write_u64_le(dst: &mut[u8], mut input: u64) {
assert!(dst.len() == 8);
input = input.to_le();
unsafe {
let tmp = &input as *const _ as *const u8;
ptr::copy_nonoverlapping_memory(dst.get_unchecked_mut(0), tmp, 8);
}
}
pub fn write_u64v_le(dst: &mut[u8], input: &[u64]) {
assert!(dst.len() == 8 * input.len());
unsafe {
let mut x: *mut u8 = dst.get_unchecked_mut(0);
let mut y: *const u64 = input.get_unchecked(0);
for _ in range(0, input.len()) {
let tmp = (*y).to_le();
ptr::copy_nonoverlapping_memory(x, &tmp as *const _ as *const u8, 8);
x = x.offset(8);
y = y.offset(1);
}
}
}
pub fn write_u32_be(dst: &mut [u8], mut input: u32) {
assert!(dst.len() == 4);
input = input.to_be();
unsafe {
let tmp = &input as *const _ as *const u8;
ptr::copy_nonoverlapping_memory(dst.get_unchecked_mut(0), tmp, 4);
}
}
pub fn write_u32_le(dst: &mut[u8], mut input: u32) {
assert!(dst.len() == 4);
input = input.to_le();
unsafe {
let tmp = &input as *const _ as *const u8;
ptr::copy_nonoverlapping_memory(dst.get_unchecked_mut(0), tmp, 4);
}
}
pub fn read_u64v_be(dst: &mut[u64], input: &[u8]) {
assert!(dst.len() * 8 == input.len());
unsafe {
let mut x = dst.get_unchecked_mut(0) as *mut u64;
let mut y = input.get_unchecked(0) as *const u8;
for _ in range(0, dst.len()) {
let mut tmp: u64 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, y, 8);
*x = Int::from_be(tmp);
x = x.offset(1);
y = y.offset(8);
}
}
}
pub fn read_u64v_le(dst: &mut[u64], input: &[u8]) {
assert!(dst.len() * 8 == input.len());
unsafe {
let mut x = dst.get_unchecked_mut(0) as *mut u64;
let mut y = input.get_unchecked(0) as *const u8;
for _ in range(0, dst.len()) {
let mut tmp: u64 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, y, 8);
*x = Int::from_le(tmp);
x = x.offset(1);
y = y.offset(8);
}
}
}
pub fn read_u32v_be(dst: &mut[u32], input: &[u8]) {
assert!(dst.len() * 4 == input.len());
unsafe {
let mut x = dst.get_unchecked_mut(0) as *mut u32;
let mut y = input.get_unchecked(0) as *const u8;
for _ in range(0, dst.len()) {
let mut tmp: u32 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, y, 4);
*x = Int::from_be(tmp);
x = x.offset(1);
y = y.offset(4);
}
}
}
pub fn read_u32v_le(dst: &mut[u32], input: &[u8]) {
assert!(dst.len() * 4 == input.len());
unsafe {
let mut x = dst.get_unchecked_mut(0) as *mut u32;
let mut y = input.get_unchecked(0) as *const u8;
for _ in range(0, dst.len()) {
let mut tmp: u32 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, y, 4);
*x = Int::from_le(tmp);
x = x.offset(1);
y = y.offset(4);
}
}
}
pub fn read_u32_le(input: &[u8]) -> u32 {
assert!(input.len() == 4);
unsafe {
let mut tmp: u32 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, input.get_unchecked(0), 4);
Int::from_le(tmp)
}
}
pub fn read_u32_be(input: &[u8]) -> u32 {
assert!(input.len() == 4);
unsafe {
let mut tmp: u32 = mem::uninitialized();
ptr::copy_nonoverlapping_memory(&mut tmp as *mut _ as *mut u8, input.get_unchecked(0), 4);
Int::from_be(tmp)
}
}
pub fn symm_enc_or_dec<S: SynchronousStreamCipher, R: ReadBuffer, W: WriteBuffer>(
c: &mut S,
input: &mut R,
output: &mut W) ->
Result<BufferResult, SymmetricCipherError> {
let count = std::cmp::min(input.remaining(), output.remaining());
c.process(input.take_next(count), output.take_next(count));
if input.is_empty() {
Ok(BufferUnderflow)
} else {
Ok(BufferOverflow)
}
}
trait ToBits {
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
(self >> 61, self << 3)
}
}
pub fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("Numeric overflow occured.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("Numeric overflow occured.")
}
}
pub fn add_bytes_to_bits_tuple
<T: Int + UnsignedInt + ToBits>
(bits: (T, T), bytes: T) -> (T, T) {
let (new_high_bits, new_low_bits) = bytes.to_bits();
let (hi, low) = bits;
match low.checked_add(new_low_bits) {
Some(x) => {
if new_high_bits == Int::zero() {
return (hi, x);
} else {
match hi.checked_add(new_high_bits) {
Some(y) => return (y, x),
None => panic!("Numeric overflow occured.")
}
}
},
None => {
let one: T = Int::one();
let z = match new_high_bits.checked_add(one) {
Some(w) => w,
None => panic!("Numeric overflow occured.")
};
match hi.checked_add(z) {
Some(y) => return (y, low + new_low_bits),
None => panic!("Numeric overflow occured.")
}
}
}
}
pub trait FixedBuffer {
fn input<F: FnMut(&[u8])>(&mut self, input: &[u8], func: F);
fn reset(&mut self);
fn zero_until(&mut self, idx: usize);
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
fn current_buffer<'s>(&'s mut self) -> &'s [u8];
fn position(&self) -> usize;
fn remaining(&self) -> usize;
fn size(&self) -> usize;
}
macro_rules! impl_fixed_buffer( ($name:ident, $size:expr) => (
impl FixedBuffer for $name {
fn input<F: FnMut(&[u8])>(&mut self, input: &[u8], mut func: F) {
let mut i = 0;
let size = $size;
if self.buffer_idx != 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
&mut self.buffer[self.buffer_idx..size],
&input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()],
input);
self.buffer_idx += input.len();
return;
}
}
while input.len() - i >= size {
func(&input[i..i + size]);
i += size;
}
let input_remaining = input.len() - i;
copy_memory(
&mut self.buffer[0..input_remaining],
&input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
&mut self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
&mut self.buffer[self.buffer_idx - len..self.buffer_idx]
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == $size);
self.buffer_idx = 0;
&self.buffer[..$size]
}
fn current_buffer<'s>(&'s mut self) -> &'s [u8] {
let tmp = self.buffer_idx;
self.buffer_idx = 0;
&self.buffer[..tmp]
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { $size - self.buffer_idx }
fn size(&self) -> usize { $size }
}
));
#[derive(Copy)]
pub struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
pub fn new() -> FixedBuffer64 {
FixedBuffer64 {
buffer: [0u8; 64],
buffer_idx: 0
}
}
}
impl_fixed_buffer!(FixedBuffer64, 64);
pub struct FixedBuffer128 {
buffer: [u8; 128],
buffer_idx: usize,
}
impl FixedBuffer128 {
pub fn new() -> FixedBuffer128 {
FixedBuffer128 {
buffer: [0u8; 128],
buffer_idx: 0
}
}
}
impl_fixed_buffer!(FixedBuffer128, 128);
pub trait StandardPadding {
fn standard_padding<F: FnMut(&[u8])>(&mut self, rem: usize, func: F);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F: FnMut(&[u8])>(&mut self, rem: usize, mut func: F) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
#[cfg(test)]
pub mod test {
use std::iter::repeat;
use std::num::Int;
use std::rand::IsaacRng;
use std::rand::distributions::{IndependentSample, Range};
use cryptoutil::{add_bytes_to_bits, add_bytes_to_bits_tuple};
use digest::Digest;
pub fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
let range = Range::new(0, 2 * blocksize + 1);
let mut count = 0;
digest.reset();
while count < total_size {
let next = range.ind_sample(&mut rng);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(&buffer[..size]);
count += size;
}
let result_str = digest.result_str();
assert!(expected == &result_str[]);
}
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(add_bytes_to_bits::<u64>(100, 10) == 180);
}
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
#[test]
fn test_add_bytes_to_bits_tuple_ok() {
assert!(add_bytes_to_bits_tuple::<u64>((5, 100), 10) == (5, 180));
}
#[test]
fn test_add_bytes_to_bits_tuple_ok2() {
assert!(add_bytes_to_bits_tuple::<u64>((5, Int::max_value()), 1) == (6, 7));
}
#[test]
fn test_add_bytes_to_bits_tuple_ok3() {
assert!(add_bytes_to_bits_tuple::<u64>((5, 0), 0x4000000000000001) == (7, 8));
}
#[test]
#[should_fail]
fn test_add_bytes_to_bits_tuple_overflow() {
add_bytes_to_bits_tuple::<u64>((Int::max_value(), Int::max_value()), 1);
}
#[test]
#[should_fail]
fn test_add_bytes_to_bits_tuple_overflow2() {
let value: u64 = Int::max_value();
add_bytes_to_bits_tuple::<u64>((value - 1, 0), 0x8000000000000000);
}
}