utls 0.12.10

A simple utilities library for stuff I actually use sometimes, with a large focus on convenience and lack of dependencies.
Documentation
#![feature(step_trait)]
use std::{iter::Step, u64};
use utls::numerics::{UInt, random::LCG};

fn main() {
    // Basic initialization
    let a = UInt::new(100u128);
    let b = UInt::new(50u128);
    println!("a: {}, b: {}", a, b);

    // Addition
    let sum = a.clone() + b.clone();
    println!("a + b = {}", sum); // 150

    // Subtraction
    let diff = a.clone() - b.clone();
    println!("a - b = {}", diff); // 50

    // Multiplication
    let prod = a.clone() * b.clone();
    println!("a * b = {}", prod); // 5000

    // Division
    let quot = a.clone() / b.clone();
    println!("a / b = {}", quot); // 2

    // Remainder
    let rem = a.clone() % b.clone();
    println!("a % b = {}", rem); // 0

    // Bitwise operations
    let c = UInt::new(0b1100u128); // 12 in binary
    let d = UInt::new(0b1010u128); // 10 in binary

    let and = c.clone() & d.clone();
    println!("c & d = {}", and); // 8 (1000 in binary)

    let or = c.clone() | d.clone();
    println!("c | d = {}", or); // 14 (1110 in binary)

    let xor = c.clone() ^ d.clone();
    println!("c ^ d = {}", xor); // 6 (0110 in binary)

    // Bit shifts
    let left_shift = c.clone() << 2;
    println!("c << 2 = {}", left_shift); // 48 (110000 in binary)

    let right_shift = c.clone() >> 1;
    println!("c >> 1 = {}", right_shift); // 6 (0110 in binary)

    // Assignment operations
    let mut x = UInt::new(100u128);

    x += UInt::new(50u128);
    println!("x after += 50: {}", x); // 150

    x -= UInt::new(30u128);
    println!("x after -= 30: {}", x); // 120

    x *= UInt::new(2u128);
    println!("x after *= 2: {}", x); // 240

    x /= UInt::new(4u128);
    println!("x after /= 4: {}", x); // 60

    x %= UInt::new(7u128);
    println!("x after %= 7: {}", x); // 4

    // Bitwise assignment operations
    let mut y = UInt::new(0b1100u128); // 12 in binary

    y &= UInt::new(0b1010u128);
    println!("y after &= 0b1010: {}", y); // 8

    y |= UInt::new(0b0011u128);
    println!("y after |= 0b0011: {}", y); // 11

    y ^= UInt::new(0b1111u128);
    println!("y after ^= 0b1111: {}", y); // 4

    // Shift assignment operations
    y <<= 2;
    println!("y after <<= 2: {}", y); // 16

    y >>= 1;
    println!("y after >>= 1: {}", y); // 8

    // Equality comparison
    let e1 = UInt::new(100u128);
    let e2 = UInt::new(100u128);
    let e3 = UInt::new(200u128);

    println!("e1 == e2: {}", e1 == e2); // true
    println!("e1 == e3: {}", e1 == e3); // false

    // Power operation
    let base = UInt::new(2u128);
    let result = base.pow(3); // 2^3
    println!("\n\n\n2^3 = {}", result); // 8

    // Scientific notation (multiply by 10^exp)
    let num = UInt::new(5u128);
    let sci_result = num.sci(2); // 5 * 10^2
    println!("5 * 10^2 = {}", sci_result); // 500

    // Static constructors
    let zero = UInt::zero(8); // 8-bit zero
    let one = UInt::one(8); // 8-bit one
    let ten = UInt::ten(8); // 8-bit ten
    let max = UInt::max(8); // 8-bit maximum value (255)
    println!(
        "8-bit: zero: {}, one: {}, ten: {}, max: {}",
        zero, one, ten, max
    );

    // Static constructors
    let zero = UInt::zero(1024); // 8-bit zero
    let one = UInt::one(1024); // 8-bit one
    let ten = UInt::ten(1024); // 8-bit ten
    let max = UInt::max(1024); // 1024-bit maximum value (???)
                               //max.dbg_disp_enabled = true;
    println!(
        "1024-bit: zero: {}, one: {}, ten: {}, max: {}",
        zero, one, ten, max
    );

    // println!("\n\n\n\nOh god,it's happening...");
    // let max = UInt::max(128000000000 / 2);
    // println!("{max}");

    // String parsing
    let hex = UInt::from_hex("FF").unwrap();
    let binary = UInt::from_binary("1010").unwrap();
    let custom = UInt::from_str_radix("123", 10).unwrap();
    println!(
        "From: hex FF = {}, binary 1010 = {}, decimal 123 = {}",
        hex, binary, custom
    );

    // Binary display mode
    let mut num = UInt::new(12u128);
    num.dbg_disp_enabled = true;
    println!("12 in binary: {}", num); // Will show binary representation
    num.dbg_disp_enabled = false;
    println!("12 in decimal: {}", num); // Will show decimal representation

    // Dynamic resizing control
    let mut num = UInt::new(255u128);
    num.dynamic_resizing = false; // Disable dynamic resizing
                                  // The following would panic due to overflow:
                                  // num *= Unsigned::new(2u128);

    // Comparison operations
    let n1 = UInt::new(100u128);
    let n2 = UInt::new(200u128);
    println!("n1 < n2: {}", n1 < n2); // true
    println!("n1 <= n2: {}", n1 <= n2); // true
    println!("n1 > n2: {}", n1 > n2); // false
    println!("n1 >= n2: {}", n1 >= n2); // false

    // Type conversion
    let u128_val: u128 = n1.clone().try_into().unwrap();
    let u64_val: u64 = n1.clone().try_into().unwrap();
    let u32_val: u32 = n1.clone().try_into().unwrap();
    let u16_val: u16 = n1.clone().try_into().unwrap();
    let u8_val: u8 = n1.clone().try_into().unwrap();
    println!(
        "Converted values: {}, {}, {}, {}, {}",
        u128_val, u64_val, u32_val, u16_val, u8_val
    );

    // From integer conversions
    let from_i8 = UInt::from(-8i8); // Takes absolute value
    let from_i16 = UInt::from(-16i16); // Takes absolute value
    let from_i32 = UInt::from(-32i32); // Takes absolute value
    println!("From signed: {}, {}, {}", from_i8, from_i16, from_i32);

    // Step trait usage
    let start = UInt::new(5u128);
    let end = UInt::new(10u128);
    let steps = UInt::steps_between(&start, &end);
    println!("Steps between 5 and 10: {:?}", steps);

    let next = UInt::forward_checked(start.clone(), 3);
    let prev = UInt::backward_checked(start.clone(), 2);
    println!(
        "(From 5) Forward 3 steps: {}, Backward 2 steps: {}",
        next.unwrap(),
        prev.unwrap()
    );

    // More advanced math funcs
    let hund = UInt::new(100u128);
    let two = UInt::new(2u128);
    let one = UInt::one(8);
    let thirteen = UInt::new(13u128);

    println!(
        "Square root of: hundred: {}, two: {}, one: {}, thirteen: {}",
        UInt::sqrt(hund),
        UInt::sqrt(two),
        UInt::sqrt(one),
        UInt::sqrt(thirteen)
    );
    // Logarithm operations
    let num = UInt::new(100u128);
    let base = UInt::new(8u128);

    // Log with custom base
    let log_result = num.log(&base);
    println!("log_8(100) = {}", log_result.unwrap()); // 

    // Natural logarithm
    let ln_result = num.ln();
    println!("ln(100) = {}", ln_result.unwrap()); // Should be approximately 4.605

    // Base-2 logarithm
    let log2_result = num.log2();
    println!("log_2(100) = {}", log2_result.unwrap()); // Should be approximately 6.643

    // Base-10 logarithm
    let log10_result = num.log10();
    println!("log_10(100) = {}", log10_result.unwrap()); // Should be approximately 2.0

    // Random number generation
    let random_8bit = UInt::random(8);
    println!("Random 8-bit number: {}", random_8bit);

    let random_16bit = UInt::random(16);
    println!("Random 16-bit number: {}", random_16bit);

    let random_1024bit = UInt::random(1024);
    println!("Random 1024-bit number: {}", random_1024bit);

    // Random number in range
    let min = UInt::new(10u128);
    let max = UInt::new(20u128);
    let random_in_range = UInt::random_in_range(16, min, max);
    println!("Random 16-bit number between 10 and 20: {}", random_in_range);

    // Using LCG directly
    let mut lcg = LCG::new(12345); // Fixed seed for reproducibility
    println!("Random u64: {}", lcg.next_u64());
    println!("Random u8: {}", lcg.next_u8());
    println!(
        "Random u8 in range (10-20): {}",
        lcg.next_u8_in_range(10, 20)
    );
}