phastft 0.3.0

A high-performance, quantum-inspired, implementation of FFT in pure Rust
Documentation
use num_traits::Float;

const LOG_BLOCK_WIDTH: usize = 7; // log2(128)
const BLOCK_WIDTH: usize = 128;

/// Pre-computed bit reversal table for 8-bit values
const BIT_REV_TABLE_8: [u8; 256] = [
    0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
    0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
    0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
    0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
    0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
    0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
    0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
    0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
    0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
    0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
    0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
    0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
    0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
    0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
    0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
    0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF,
];

/// Generate compile-time bit reversal table for block width
/// This would ideally be const fn but Rust const fn is limited
#[inline(always)]
const fn bit_reverse_const(val: usize, bits: usize) -> usize {
    if bits == 0 {
        return 0;
    }
    let mut result = 0;
    let mut v = val;
    let mut b = bits;
    
    while b > 0 {
        result = (result << 1) | (v & 1);
        v >>= 1;
        b -= 1;
    }
    result
}

/// Generate bit reversal table at compile time
const fn generate_bit_rev_table<const SIZE: usize, const BITS: usize>() -> [usize; SIZE] {
    let mut table = [0; SIZE];
    let mut i = 0;
    while i < SIZE {
        table[i] = bit_reverse_const(i, BITS);
        i += 1;
    }
    table
}

// Pre-computed tables for common block sizes
const BIT_REV_TABLE_128: [usize; 128] = generate_bit_rev_table::<128, 7>();

/// Fast inline bit reversal using lookup tables
#[inline(always)]
fn fast_bit_reverse(val: usize, bits: usize) -> usize {
    match bits {
        0..=7 => (BIT_REV_TABLE_8[val & 0xFF] as usize) >> (8 - bits),
        8 => BIT_REV_TABLE_8[val & 0xFF] as usize,
        9..=16 => {
            let lo = BIT_REV_TABLE_8[val & 0xFF] as usize;
            let hi = BIT_REV_TABLE_8[(val >> 8) & 0xFF] as usize;
            ((lo << 8) | hi) >> (16 - bits)
        }
        _ => val.reverse_bits() >> (64 - bits)
    }
}

/// Optimized COBRA implementation with pattern-based swapping
#[multiversion::multiversion(targets("x86_64+avx512f+avx512bw+avx512cd+avx512dq+avx512vl", // x86_64-v4
                                     "x86_64+avx2+fma", // x86_64-v3
                                     "x86_64+sse4.2", // x86_64-v2
                                     "x86+avx512f+avx512bw+avx512cd+avx512dq+avx512vl",
                                     "x86+avx2+fma",
                                     "x86+sse4.2",
                                     "x86+sse2",
                                     "aarch64+neon", // ARM64 with NEON (Apple Silicon M1/M2)
))]
pub fn cobra_apply_optimized<T: Default + Copy + Clone>(v: &mut [T], log_n: usize) {
    if log_n <= 2 * LOG_BLOCK_WIDTH {
        // For small sizes, use simple bit reversal
        simple_bit_rev(v, log_n);
        return;
    }

    let num_b_bits = log_n - 2 * LOG_BLOCK_WIDTH;
    let b_size: usize = 1 << num_b_bits;
    
    // Use const table for block width = 128
    const BLOCK_REV: &[usize] = &BIT_REV_TABLE_128;
    
    // Pre-compute b reversals only if b_size is small enough to fit in cache
    let use_b_table = b_size <= 4096; // L1 cache friendly
    
    let b_rev_table: Vec<usize> = if use_b_table {
        (0..b_size).map(|b| fast_bit_reverse(b, num_b_bits)).collect()
    } else {
        Vec::new()
    };

    // Stack-allocated buffer for cache block
    let mut buffer = [T::default(); BLOCK_WIDTH * BLOCK_WIDTH];

    for b in 0..b_size {
        let b_rev = if use_b_table {
            b_rev_table[b]
        } else {
            fast_bit_reverse(b, num_b_bits)
        };

        // Phase 1: Load block into buffer with bit-reversed indexing
        for a in 0..BLOCK_WIDTH {
            let a_rev = BLOCK_REV[a];
            let src_base = (a << num_b_bits << LOG_BLOCK_WIDTH) | (b << LOG_BLOCK_WIDTH);
            let dst_base = a_rev << LOG_BLOCK_WIDTH;
            
            // Copy contiguous chunk for better cache performance
            for c in 0..BLOCK_WIDTH {
                buffer[dst_base | c] = v[src_base | c];
            }
        }

        // Phase 2: Swap with bit-reversed block
        for c in 0..BLOCK_WIDTH {
            let c_rev = BLOCK_REV[c];
            
            for a_rev in 0..BLOCK_WIDTH {
                let a = BLOCK_REV[a_rev];
                
                // Only swap if index < reversed_index to avoid double swapping
                let should_swap = a < c_rev 
                    || (a == c_rev && b < b_rev) 
                    || (a == c_rev && b == b_rev && a_rev < c);
                
                if should_swap {
                    let v_idx = (c_rev << num_b_bits << LOG_BLOCK_WIDTH) 
                        | (b_rev << LOG_BLOCK_WIDTH) 
                        | a_rev;
                    let b_idx = (a_rev << LOG_BLOCK_WIDTH) | c;
                    
                    // Use unsafe for better performance if needed
                    std::mem::swap(&mut v[v_idx], &mut buffer[b_idx]);
                }
            }
        }

        // Phase 3: Write back modified buffer
        for a in 0..BLOCK_WIDTH {
            let a_rev = BLOCK_REV[a];
            
            for c in 0..BLOCK_WIDTH {
                let c_rev = BLOCK_REV[c];
                
                let should_copy = a < c_rev 
                    || (a == c_rev && b < b_rev) 
                    || (a == c_rev && b == b_rev && a_rev < c);
                
                if should_copy {
                    let v_idx = (a << num_b_bits << LOG_BLOCK_WIDTH) 
                        | (b << LOG_BLOCK_WIDTH) 
                        | c;
                    let b_idx = (a_rev << LOG_BLOCK_WIDTH) | c;
                    
                    std::mem::swap(&mut v[v_idx], &mut buffer[b_idx]);
                }
            }
        }
    }
}

/// Simple bit reversal for small arrays
#[inline]
fn simple_bit_rev<T>(buf: &mut [T], log_n: usize) {
    let n = 1 << log_n;
    for i in 0..n {
        let j = fast_bit_reverse(i, log_n);
        if i < j {
            buf.swap(i, j);
        }
    }
}

/// Process complex numbers (real and imaginary parts together)
#[multiversion::multiversion(targets("x86_64+avx512f+avx512bw+avx512cd+avx512dq+avx512vl",
                                     "x86_64+avx2+fma",
                                     "x86_64+sse4.2",
                                     "x86+avx512f+avx512bw+avx512cd+avx512dq+avx512vl",
                                     "x86+avx2+fma",
                                     "x86+sse4.2",
                                     "x86+sse2",
                                     "aarch64+neon",
))]
pub fn cobra_apply_complex<T: Float + Default + Copy + Clone + Send + Sync>(reals: &mut [T], imags: &mut [T], log_n: usize) {
    if log_n <= 2 * LOG_BLOCK_WIDTH {
        simple_bit_rev_complex(reals, imags, log_n);
        return;
    }

    // For simplicity, process sequentially rather than in parallel
    cobra_apply_optimized(reals, log_n);
    cobra_apply_optimized(imags, log_n);
}

/// Simple bit reversal for complex arrays
#[inline]
fn simple_bit_rev_complex<T>(reals: &mut [T], imags: &mut [T], log_n: usize) {
    let n = 1 << log_n;
    for i in 0..n {
        let j = fast_bit_reverse(i, log_n);
        if i < j {
            reals.swap(i, j);
            imags.swap(i, j);
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_bit_reverse_const() {
        assert_eq!(bit_reverse_const(0b0001, 4), 0b1000);
        assert_eq!(bit_reverse_const(0b0110, 4), 0b0110);
        assert_eq!(bit_reverse_const(0b1010, 4), 0b0101);
    }

    #[test]
    fn test_fast_bit_reverse() {
        for i in 0..256 {
            let rev7 = fast_bit_reverse(i, 7);
            let expected = i.reverse_bits() >> (64 - 7);
            assert_eq!(rev7, expected, "Failed for i={}", i);
        }
    }

    #[test]
    fn test_cobra_optimized() {
        for n in 4..20 {
            let size = 1 << n;
            let mut v: Vec<usize> = (0..size).collect();
            let mut expected = v.clone();
            
            cobra_apply_optimized(&mut v, n);
            
            // Verify against simple bit reversal
            for i in 0..size {
                let j = i.reverse_bits() >> (64 - n);
                if i < j {
                    expected.swap(i, j);
                }
            }
            
            assert_eq!(v, expected, "Failed for n={}", n);
        }
    }
}