hft_benchmarks/
mock_core.rs

1//! Mock implementations for testing without hft-core dependency
2
3use std::sync::atomic::{AtomicU64, Ordering};
4
5static FREQUENCY_MHZ: AtomicU64 = AtomicU64::new(3000); // Default 3GHz
6
7pub fn cpu_frequency_mhz() -> u64 {
8    FREQUENCY_MHZ.load(Ordering::Relaxed)
9}
10
11pub fn set_cpu_frequency_mhz(freq: u64) {
12    FREQUENCY_MHZ.store(freq, Ordering::Relaxed);
13}
14
15/// Mock timestamp type for benchmarking
16#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
17pub struct Timestamp(u64);
18
19impl Timestamp {
20    pub fn now() -> Self {
21        Self(std::time::SystemTime::now()
22            .duration_since(std::time::UNIX_EPOCH)
23            .unwrap()
24            .as_nanos() as u64)
25    }
26    
27    pub fn as_nanos(&self) -> u64 {
28        self.0
29    }
30}
31
32/// Mock price type for benchmarking
33#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
34pub struct Price(i64);
35
36impl Price {
37    pub fn new(value: f64) -> Self {
38        Self((value * 100.0) as i64) // Simple fixed-point
39    }
40    
41    pub fn as_f64(&self) -> f64 {
42        self.0 as f64 / 100.0
43    }
44}
45
46impl std::ops::Add for Price {
47    type Output = Self;
48    fn add(self, rhs: Self) -> Self::Output {
49        Self(self.0 + rhs.0)
50    }
51}
52
53impl std::ops::Sub for Price {
54    type Output = Self;
55    fn sub(self, rhs: Self) -> Self::Output {
56        Self(self.0 - rhs.0)
57    }
58}
59
60/// Mock quantity type for benchmarking
61#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
62pub struct Quantity(u64);
63
64impl Quantity {
65    pub fn new(value: u64) -> Self {
66        Self(value)
67    }
68    
69    pub fn as_u64(&self) -> u64 {
70        self.0
71    }
72}
73
74impl std::ops::Add for Quantity {
75    type Output = Self;
76    fn add(self, rhs: Self) -> Self::Output {
77        Self(self.0 + rhs.0)
78    }
79}
80
81impl std::ops::Sub for Quantity {
82    type Output = Self;
83    fn sub(self, rhs: Self) -> Self::Output {
84        Self(self.0.saturating_sub(rhs.0))
85    }
86}
87
88/// Mock SPSC ring buffer for benchmarking
89pub struct SPSCRingBuffer<T> {
90    buffer: Vec<Option<T>>,
91    head: std::sync::atomic::AtomicUsize,
92    tail: std::sync::atomic::AtomicUsize,
93    capacity: usize,
94}
95
96impl<T> SPSCRingBuffer<T> {
97    pub fn new(capacity: usize) -> Self {
98        let mut buffer = Vec::with_capacity(capacity + 1);
99        for _ in 0..=capacity {
100            buffer.push(None);
101        }
102        
103        Self {
104            buffer,
105            head: std::sync::atomic::AtomicUsize::new(0),
106            tail: std::sync::atomic::AtomicUsize::new(0),
107            capacity: capacity + 1,
108        }
109    }
110    
111    pub fn push(&self, item: T) -> bool {
112        let head = self.head.load(Ordering::Acquire);
113        let next_head = (head + 1) % self.capacity;
114        
115        if next_head == self.tail.load(Ordering::Acquire) {
116            false // Full
117        } else {
118            unsafe {
119                let ptr = self.buffer.as_ptr().add(head) as *mut Option<T>;
120                ptr.write(Some(item));
121            }
122            self.head.store(next_head, Ordering::Release);
123            true
124        }
125    }
126    
127    pub fn pop(&self) -> Option<T> {
128        let tail = self.tail.load(Ordering::Acquire);
129        if tail == self.head.load(Ordering::Acquire) {
130            None // Empty
131        } else {
132            let item = unsafe {
133                let ptr = self.buffer.as_ptr().add(tail) as *mut Option<T>;
134                ptr.read()
135            };
136            self.tail.store((tail + 1) % self.capacity, Ordering::Release);
137            item
138        }
139    }
140}
141
142unsafe impl<T: Send> Send for SPSCRingBuffer<T> {}
143unsafe impl<T: Send> Sync for SPSCRingBuffer<T> {}
144
145/// Mock wait-free hash table for benchmarking  
146pub struct WaitFreeHashTable<K, V> {
147    buckets: Vec<std::sync::RwLock<Vec<(K, V)>>>,
148    capacity: usize,
149}
150
151impl<K: Clone + Eq + std::hash::Hash, V: Clone> WaitFreeHashTable<K, V> {
152    pub fn new(capacity: usize) -> Self {
153        let mut buckets = Vec::with_capacity(capacity);
154        for _ in 0..capacity {
155            buckets.push(std::sync::RwLock::new(Vec::new()));
156        }
157        
158        Self { buckets, capacity }
159    }
160    
161    fn hash(&self, key: &K) -> usize {
162        use std::collections::hash_map::DefaultHasher;
163        use std::hash::Hasher;
164        
165        let mut hasher = DefaultHasher::new();
166        key.hash(&mut hasher);
167        (hasher.finish() % self.capacity as u64) as usize
168    }
169    
170    pub fn insert(&self, key: K, value: V) -> bool {
171        let bucket_idx = self.hash(&key);
172        let mut bucket = self.buckets[bucket_idx].write().unwrap();
173        
174        // Update existing or insert new
175        for (k, v) in bucket.iter_mut() {
176            if k == &key {
177                *v = value;
178                return false; // Updated existing
179            }
180        }
181        
182        bucket.push((key, value));
183        true // Inserted new
184    }
185    
186    pub fn get(&self, key: &K) -> Option<V> {
187        let bucket_idx = self.hash(key);
188        let bucket = self.buckets[bucket_idx].read().unwrap();
189        
190        bucket.iter()
191            .find(|(k, _)| k == key)
192            .map(|(_, v)| v.clone())
193    }
194}
195
196/// Mock object pool for benchmarking
197pub struct ObjectPool<T> {
198    objects: std::sync::Mutex<Vec<T>>,
199}
200
201impl<T> Default for ObjectPool<T> {
202    fn default() -> Self {
203        Self::new()
204    }
205}
206
207impl<T> ObjectPool<T> {
208    pub fn new() -> Self {
209        Self {
210            objects: std::sync::Mutex::new(Vec::new()),
211        }
212    }
213    
214    pub fn with_capacity(capacity: usize) -> Self {
215        Self {
216            objects: std::sync::Mutex::new(Vec::with_capacity(capacity)),
217        }
218    }
219    
220    pub fn get(&self, create_fn: impl FnOnce() -> T) -> T {
221        if let Ok(mut objects) = self.objects.lock() {
222            objects.pop().unwrap_or_else(create_fn)
223        } else {
224            create_fn()
225        }
226    }
227    
228    pub fn put(&self, obj: T) {
229        if let Ok(mut objects) = self.objects.lock() {
230            objects.push(obj);
231        }
232    }
233}
234
235/// Mock NUMA arena allocator for benchmarking
236pub struct NumaArenaAllocator {
237    node_id: usize,
238}
239
240impl NumaArenaAllocator {
241    pub fn new(node_id: usize) -> Self {
242        Self { node_id }
243    }
244    
245    pub fn allocate(&self, size: usize) -> Vec<u8> {
246        // Just use regular allocation for the mock
247        vec![0u8; size]
248    }
249    
250    pub fn node_id(&self) -> usize {
251        self.node_id
252    }
253}