use num_bigint::BigUint;
use num_format::ToFormattedString;
use num_traits::One;
use rand::{Rng, RngCore, SeedableRng, rngs::StdRng};
use std::time::Instant;
use zeck::*;
const RNG_SEED: u64 = 42;
fn main() {
let start_time = Instant::now();
for i in 0..20 {
println!("Bit count for {}: {}", i, bit_count_for_number(i));
}
for i in 0..20 {
println!(
"The {i}th Fibonacci number is: {}",
memoized_slow_fibonacci_recursive(i)
);
}
for i in 0..20 {
println!(
"The bigint {i}th Fibonacci number is: {}",
memoized_slow_fibonacci_biguint_iterative(i)
);
}
for i in 0..20 {
println!(
"Zeckendorf descending list for {}: {:?}",
i,
memoized_zeckendorf_list_descending_for_integer(i)
);
}
for i in 0..20 {
println!(
"Zeckendorf descending list for {}: {:?}",
i,
memoized_zeckendorf_list_descending_for_biguint(&BigUint::from(i as u64))
);
}
for i in 0..20 {
let zld = memoized_zeckendorf_list_descending_for_integer(i);
println!("Zeckendorf descending list for {}: {:?}", i, zld);
let ezld = zl_to_ezl(&zld);
println!("Effective Zeckendorf list descending for {}: {:?}", i, ezld);
let ezba = ezba_from_ezld(&ezld);
println!("Effective Zeckendorf bits ascending for {}: {:?}", i, ezba);
}
let limit = 10u64;
let big_fibonacci = memoized_slow_fibonacci_biguint_iterative(limit);
println!("The {limit}th Fibonacci number is: {}", big_fibonacci);
println!(
"it takes {} bits to represent the {limit}th Fibonacci number",
big_fibonacci.bits()
);
test_phi_squared_and_all_ones_zeckendorf_ratios();
test_all_ones_zeckendorf_bits_to_binary_bits_ratios();
print_aozns_as_binary_bits();
compare_aozn_binary_bit_interpretations();
test_random_data_with_lots_of_leading_zeros();
debug_zeck_file_format();
let end_time = Instant::now();
println!("Time taken: {:?}", end_time.duration_since(start_time));
}
fn _test_zeckendorf_compress_and_decompress_number(number: u64) {
println!("Number to compress: {:?}", number);
let data = BigUint::from(number).to_bytes_be();
println!("Number as big endian bytes: {:?}", data);
let compressed_data = padless_zeckendorf_compress_be_dangerous(&data);
println!("Compressed data: {:?}", compressed_data);
let decompressed_data = padless_zeckendorf_decompress_be_dangerous(&compressed_data);
println!("Decompressed data: {:?}", decompressed_data);
let decompressed_number = BigUint::from_bytes_be(&decompressed_data);
println!("Decompressed number: {:?}", decompressed_number);
}
fn _test_zeckendorf_compress_and_decompress_file(filename: &str) {
println!(
"Testing compression and decompression of file: {:?}",
filename
);
let data = std::fs::read(filename).expect("Failed to read file");
let data_size = data.len();
println!("Data bytes size: {:?}", data_size);
let compressed_data = padless_zeckendorf_compress_be_dangerous(&data);
let compressed_data_size = compressed_data.len();
println!("Compressed data size: {:?}", compressed_data_size);
let decompressed_data = padless_zeckendorf_decompress_be_dangerous(&compressed_data);
let decompressed_data_size = decompressed_data.len();
println!("Decompressed data size: {:?}", decompressed_data_size);
assert_eq!(data, decompressed_data);
let compression_ratio = compressed_data_size as f64 / data_size as f64;
println!(
"Compression ratio was {x:0.3}%",
x = compression_ratio * 100.0
);
if compression_ratio > 1.0 {
println!(
"Compressing this file was {x:0.3}% worse",
x = (compression_ratio - 1.0) * 100.0
);
} else {
println!(
"Compressing this file was {x:0.3}% better",
x = (1.0 - compression_ratio) * 100.0
);
}
}
fn _flamegraph_zeckendorf_decompress_be() {
for i in 0..1000000 {
let data = BigUint::from(i as u64).to_bytes_be();
let compressed_data = data;
let decompressed_data = padless_zeckendorf_decompress_be_dangerous(&compressed_data);
std::hint::black_box(decompressed_data);
}
}
fn _test_bit_count_for_all_ones_effective_zeckendorf_bits_ascending() {
let bigint = all_ones_zeckendorf_to_biguint(100000);
println!("Bit count: {:?}", bigint.bits());
}
fn _find_fibonacci_by_bit_count(target_bits: u64) -> (u64, BigUint) {
let mut index = 0u64;
loop {
let fibonacci = memoized_slow_fibonacci_biguint_iterative(index);
let bit_count = fibonacci.bits();
if bit_count >= target_bits {
return (index, (*fibonacci).clone());
}
index += 1;
}
}
fn _test_find_fibonacci_by_bit_count() {
let start_time = Instant::now();
for i in (500..=1500).step_by(100) {
let (index, fibonacci) = _find_fibonacci_by_bit_count(i);
println!(
"The index of the Fibonacci number that has at least {i} bits is: {:?}, at bit count: {:?}",
index,
fibonacci.bits()
);
println!(
"The Fibonacci number that has at least {i} bits is: {:?}",
fibonacci
);
}
let end_time = Instant::now();
println!(
"Time taken to find Fibonacci numbers by bit count: {:?}",
end_time.duration_since(start_time)
);
}
fn _test_slow_fibonacci_bigint_iterative() {
println!("Testing slow Fibonacci bigint iterative function");
for i in 0..20 {
let fibonacci = slow_fibonacci_biguint_iterative(i);
println!("The {i}th Fibonacci number is: {}", fibonacci);
}
}
fn _test_slow_fibonacci_bigint_iterative_large(fi: u64) {
println!(
"Testing slow Fibonacci bigint iterative function for index: {}",
fi.to_formatted_string(&num_format::Locale::en)
);
let start_time = Instant::now();
let fibonacci = slow_fibonacci_biguint_iterative(fi);
std::hint::black_box(fibonacci);
let end_time = Instant::now();
println!(
"It took {:0.3?} to calculate the {}th Fibonacci number",
end_time.duration_since(start_time),
fi.to_formatted_string(&num_format::Locale::en)
);
}
fn _test_fast_doubling_fibonacci_bigint() {
println!("Testing fast doubling Fibonacci bigint function");
let fibonacci = memoized_fast_doubling_fibonacci_biguint(100);
println!("The 100th Fibonacci number is: {}", fibonacci);
let cache = zeck::FAST_DOUBLING_FIBONACCI_BIGUINT_CACHE
.read()
.expect("Failed to read fast doubling Fibonacci cache");
println!(
"Querying the 100th Fibonacci number, using the fast doubling algorithm, generated only {} cached Fibonacci numbers",
cache.len()
);
assert_eq!(cache.len(), 10);
let mut sorted_cache = cache.iter().collect::<Vec<_>>();
sorted_cache.sort_by_key(|(fi, _)| *fi);
for (fi, value) in sorted_cache.iter() {
println!(
"The {fi}th Fibonacci number, using the fast doubling algorithm, is: {}",
value
);
}
}
fn _all_ones_decompressions() {
let mut all_ones_byte_size = 10;
let size_multipier = 10;
let max_byte_size = 10_000;
while all_ones_byte_size <= max_byte_size {
let start_time = Instant::now();
println!("Testing all ones byte size: {}", all_ones_byte_size);
let mock_compressed_all_ones_data = vec![0xFF; all_ones_byte_size];
let mock_decompressed_data =
padless_zeckendorf_decompress_be_dangerous(&mock_compressed_all_ones_data);
println!(
"Mock decompressed data byte size: {:?}",
mock_decompressed_data.len()
);
println!(
"Mock decompressed data raw bit size: {:?}",
mock_decompressed_data.len() * 8
);
let size_ratio =
mock_compressed_all_ones_data.len() as f64 / mock_decompressed_data.len() as f64;
println!("Size ratio: {x:0.3}", x = size_ratio);
println!(
"If an input data happens to be Zeckendorf compressed as all ones of size {all_ones_byte_size} bytes, the decompressed data will be {} bytes",
mock_decompressed_data.len()
);
println!(
"This means the input data was compressed by {x:0.3}%",
x = (1.0 - size_ratio) * 100.0
);
println!(
"In other words, the compressed data was {x:0.3}% of the original data",
x = size_ratio * 100.0
);
println!(
"In other other words, the decompressed data was {x:0.3}% of the size of the compressed data",
x = 1.0 / size_ratio * 100.0
);
let end_time = Instant::now();
println!(
"Time taken to test all ones decompression for byte size {all_ones_byte_size}: {:?}",
end_time.duration_since(start_time)
);
all_ones_byte_size *= size_multipier;
}
}
fn _test_all_ones_zeckendorf_ratios() {
let start_time = Instant::now();
let mut prev = all_ones_zeckendorf_to_biguint(1);
for i in 2..=46 {
let curr = all_ones_zeckendorf_to_biguint(i);
println!("The {i}th all ones Zeckendorf number is: {}", curr);
let ratio = biguint_to_approximate_f64(&curr) / biguint_to_approximate_f64(&prev);
println!(
"The {i}th all ones Zeckendorf number is {ratio} times larger than the {}th all ones Zeckendorf number",
i - 1
);
let delta = ratio - PHI_SQUARED;
println!("The delta between the ratio and phi squared is {delta}");
prev = curr;
}
let end_time = Instant::now();
println!(
"Time taken to test all ones Zeckendorf ratio: {:?}",
end_time.duration_since(start_time)
);
}
fn biguint_to_approximate_f64(value: &BigUint) -> f64 {
let digits = value.to_u64_digits();
if digits.len() == 1 {
digits[0] as f64
} else if digits.is_empty() {
0.0
} else {
let bits = value.bits() as f64;
let capped_bits = bits.min(1023.0);
2_f64.powf(capped_bits)
}
}
fn test_phi_squared_and_all_ones_zeckendorf_ratios() {
let start_time = Instant::now();
let mut prev_ratio =
PHI_SQUARED / biguint_to_approximate_f64(&all_ones_zeckendorf_to_biguint(1));
for i in 2..=46 {
let phi_squared_i = PHI_SQUARED.powi(i as i32);
println!("The {i}th phi squared is: {phi_squared_i}");
let curr = all_ones_zeckendorf_to_biguint(i);
println!("The {i}th all ones Zeckendorf number is: {}", curr);
let ratio = phi_squared_i / biguint_to_approximate_f64(&curr);
println!(
"The {i}th phi squared is {ratio} times larger than {i}th all ones Zeckendorf number"
);
let ratio_delta = ratio - prev_ratio;
println!("The delta between the ratio and the previous ratio is {ratio_delta}");
let ratio_growth_rate = ratio_delta / prev_ratio;
println!("The growth rate of the ratio is {ratio_growth_rate}");
prev_ratio = ratio;
}
let end_time = Instant::now();
println!(
"Time taken to test phi squared and all ones Zeckendorf ratio: {:?}",
end_time.duration_since(start_time)
);
}
#[derive(Debug)]
struct AllOnesZeckendorfBitsToBinaryBitsRatio {
all_ones_zeckendorf_bit_count: usize,
ratio: f64,
}
impl PartialEq for AllOnesZeckendorfBitsToBinaryBitsRatio {
fn eq(&self, other: &Self) -> bool {
self.ratio == other.ratio
}
}
impl PartialOrd for AllOnesZeckendorfBitsToBinaryBitsRatio {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Eq for AllOnesZeckendorfBitsToBinaryBitsRatio {}
impl Ord for AllOnesZeckendorfBitsToBinaryBitsRatio {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
if self.ratio < other.ratio {
std::cmp::Ordering::Less
} else if self.ratio > other.ratio {
std::cmp::Ordering::Greater
} else {
std::cmp::Ordering::Equal
}
}
}
fn test_all_ones_zeckendorf_bits_to_binary_bits_ratios() {
let start_time = Instant::now();
let mut all_ratios: Vec<AllOnesZeckendorfBitsToBinaryBitsRatio> = Vec::new();
for i in 0..5_000 {
let all_ones_zeckendorf_bit_count = i;
let all_ones_zeckendorf = all_ones_zeckendorf_to_biguint(all_ones_zeckendorf_bit_count);
let compression_ratio =
all_ones_zeckendorf_bit_count as f64 / all_ones_zeckendorf.bits() as f64;
all_ratios.push(AllOnesZeckendorfBitsToBinaryBitsRatio {
all_ones_zeckendorf_bit_count: i,
ratio: compression_ratio,
});
}
all_ratios.sort();
for (index, ratio) in all_ratios.iter().take(20).enumerate() {
let all_ones_zeckendorf_value =
all_ones_zeckendorf_to_biguint(ratio.all_ones_zeckendorf_bit_count);
let binary_value_bit_count = all_ones_zeckendorf_value.bits();
println!(
"The {index}th best all ones Zeckendorf to binary bits ratio is {ratio:.10} with AOZN bit count: {all_ones_zeckendorf_bit_count}, compared to binary value: {all_ones_zeckendorf_value} which needs {binary_value_bit_count} bits",
index = index + 1,
ratio = ratio.ratio,
all_ones_zeckendorf_bit_count = ratio.all_ones_zeckendorf_bit_count
);
}
let end_time = Instant::now();
println!(
"Time taken to test all ones Zeckendorf bits to binary bits ratios: {:?}",
end_time.duration_since(start_time)
);
}
fn _test_decompressing_large_random_data() {
let start_time = Instant::now();
let num_bytes = 1_000;
let num_tests = 10_000;
println!(
"Searching for a case where the decompressed data is smaller than the original data of size {num_bytes} bytes..."
);
let mut rng = StdRng::seed_from_u64(RNG_SEED);
for i in 0..num_tests {
let mut data = vec![0u8; num_bytes];
rng.fill_bytes(&mut data);
let decompressed_data = padless_zeckendorf_decompress_be_dangerous(&data);
let decompressed_data_raw_bit_size = decompressed_data.len() * 8;
let original_data_raw_bit_size = data.len() * 8;
let decompressed_data_raw_bit_size_ratio =
decompressed_data_raw_bit_size as f64 / original_data_raw_bit_size as f64;
if decompressed_data_raw_bit_size < original_data_raw_bit_size {
println!(
"Found a case where the decompressed data is smaller than the original data, on test {i} of {num_tests}!"
);
println!("Original data size: {:?} bytes", data.len());
println!(
"Decompressed data size: {:?} bytes",
decompressed_data.len()
);
println!(
"Original data raw bit size: {:?} bits",
original_data_raw_bit_size
);
println!(
"Decompressed data raw bit size: {:?} bits",
decompressed_data_raw_bit_size
);
println!(
"Decompressed data raw bit size ratio to original data raw bit size: {:?}",
decompressed_data_raw_bit_size_ratio
);
println!(
"Decompressed data raw bit size ratio to original data raw bit size percentage: {:?}%",
decompressed_data_raw_bit_size_ratio * 100.0
);
println!(
"The difference is {:?} bits",
original_data_raw_bit_size - decompressed_data_raw_bit_size
);
println!(
"The difference percentage is {:?}%",
(original_data_raw_bit_size - decompressed_data_raw_bit_size) as f64
/ original_data_raw_bit_size as f64
* 100.0
);
println!("Original data: {:X?}", data);
println!("Decompressed data: {:X?}", decompressed_data);
println!(
"Found after {i} tests, and time: {:?}",
Instant::now().duration_since(start_time)
);
return;
}
}
println!(
"Ran {num_tests} tests, and in none of them did the decompressed data end up being smaller than the original data"
);
let end_time = Instant::now();
println!(
"Time taken to test decompressing {num_tests} pieces of large random data of size {num_bytes} bytes: {:?}",
end_time.duration_since(start_time)
);
}
fn print_aozns_as_binary_bits() {
let max_bits = 20;
println!("Printing the first {max_bits} all ones Zeckendorf numbers as binary bits:");
for i in 0..max_bits {
let all_ones_zeckendorf = all_ones_zeckendorf_to_biguint(i);
let binary_bit_count = all_ones_zeckendorf.bits();
let binary_bits = all_ones_zeckendorf.to_bytes_be();
let binary_bits_str = binary_bits
.iter()
.map(|byte| format!("{:08b}", byte))
.collect::<Vec<_>>()
.join(" ");
println!(
"The all ones Zeckendorf number with {i} bits represents the number {all_ones_zeckendorf} which is {binary_bit_count} bits in binary: {binary_bits_str}"
);
}
println!("Finished printing the first {max_bits} all ones Zeckendorf numbers as binary bits");
}
fn binary_ones_to_bigint(n: usize) -> BigUint {
(BigUint::one() << (n as u64)) - BigUint::one()
}
fn compare_aozn_binary_bit_interpretations() {
let max_bits = 20;
let start_time = Instant::now();
println!("Comparing the bits of all ones Zeckendorf numbers to their binary representation:");
for i in 0..=max_bits {
let all_ones_zeckendorf = all_ones_zeckendorf_to_biguint(i);
let n_binary_ones = binary_ones_to_bigint(i);
println!(
"{i} ones ({n_binary_ones:20b}) as binary is {n_binary_ones}; as Zeckendorf bits, it is {all_ones_zeckendorf}"
);
}
let end_time = Instant::now();
println!(
"Finished comparing the bits of all ones Zeckendorf numbers to their binary representation in {:?}",
end_time.duration_since(start_time)
);
}
fn test_random_data_with_lots_of_leading_zeros() {
let leading_zero_byte_count = 5;
let random_byte_count = 10;
println!(
"Testing random data with {leading_zero_byte_count} leading zero bytes and {random_byte_count} random bytes..."
);
let leading_zero_bytes = vec![0; leading_zero_byte_count];
let mut rng = StdRng::seed_from_u64(RNG_SEED);
let random_bytes = (0..random_byte_count)
.map(|_| rng.random_range(0..=255))
.collect::<Vec<u8>>();
let random_data_with_leading_zeroes = [leading_zero_bytes, random_bytes].concat();
let compressed_data =
padless_zeckendorf_compress_be_dangerous(&random_data_with_leading_zeroes);
let decompressed_data = padless_zeckendorf_decompress_be_dangerous(&compressed_data);
println!(
"Random data with leading zeroes: {:?}",
random_data_with_leading_zeroes
);
println!(
"Random data with leading zeroes size: {:?}",
random_data_with_leading_zeroes.len()
);
println!(
"Random data with leading zeroes compressed: {:?}",
compressed_data
);
println!(
"Random data with leading zeroes compressed size: {:?}",
compressed_data.len()
);
println!(
"Random data with leading zeroes decompressed: {:?}",
decompressed_data
);
println!(
"Random data with leading zeroes decompressed size: {:?}",
decompressed_data.len()
);
}
fn debug_zeck_file_format() {
let data = vec![1, 0];
println!("Original data: {:?}", data);
let best_compression_result = compress_zeck_best(&data).unwrap();
match best_compression_result {
zeck_file_format::compress::BestCompressionResult::BigEndianBest { zeck_file, le_size } => {
println!("Big endian best compression result: {:?}", zeck_file);
println!("Big endian best compression result le size: {:?}", le_size);
let decompressed_data = decompress_zeck_file(&zeck_file).unwrap();
println!(
"Big endian best compression result decompressed data: {:?}",
decompressed_data
);
println!(
"Big endian best compression result decompressed data size: {:?}",
decompressed_data.len()
);
}
zeck_file_format::compress::BestCompressionResult::LittleEndianBest {
zeck_file,
be_size,
} => {
println!("Little endian best compression result: {:?}", zeck_file);
println!(
"Little endian best compression result be size: {:?}",
be_size
);
let decompressed_data = decompress_zeck_file(&zeck_file).unwrap();
println!(
"Little endian best compression result decompressed data: {:?}",
decompressed_data
);
println!(
"Little endian best compression result decompressed data size: {:?}",
decompressed_data.len()
);
}
zeck_file_format::compress::BestCompressionResult::Neither { be_size, le_size } => {
println!("Neither compression method produced a smaller output than the original.");
println!("Big endian best compression result size: {:?}", be_size);
println!(
"Little endian best compression result le size: {:?}",
le_size
);
}
}
}