1use std::cell::UnsafeCell;
4use std::fmt;
5use std::hash::{Hash, Hasher};
6use std::marker::PhantomData;
7use std::mem::{ManuallyDrop, MaybeUninit};
8
9use parking_lot::lock_api::RawMutex;
10
11use oxidd_core::util::{Borrowed, DropWith};
12use oxidd_core::{ApplyCache, Edge, Manager, ManagerEventSubscriber};
13
14#[cfg(feature = "hugealloc")]
15type Box<T> = allocator_api2::boxed::Box<T, hugealloc::HugeAlloc>;
16#[cfg(feature = "hugealloc")]
17type Vec<T> = allocator_api2::vec::Vec<T, hugealloc::HugeAlloc>;
18
19pub struct DMApplyCache<M: Manager, O, H, const ARITY: usize = 2>(
21 Box<[Entry<M, O, ARITY>]>,
22 PhantomData<H>,
23);
24
25impl<M: Manager, O, H, const ARITY: usize> DropWith<M::Edge> for DMApplyCache<M, O, H, ARITY>
26where
27 O: Copy + Eq + Hash,
28{
29 fn drop_with(self, _drop_edge: impl Fn(M::Edge)) {
30 }
32}
33
34union Operand<E> {
35 edge: ManuallyDrop<E>,
36 numeric: u32,
37 uninit: (),
38}
39
40impl<E> Operand<E> {
41 const UNINIT: Self = Self { uninit: () };
42
43 unsafe fn assume_edge_ref(&self) -> &E {
45 unsafe { &self.edge }
47 }
48
49 unsafe fn assume_numeric(&self) -> u32 {
51 unsafe { self.numeric }
53 }
54
55 fn write_edge(&mut self, edge: Borrowed<E>) {
56 self.edge = unsafe { Borrowed::into_inner(edge) };
60 }
61
62 fn write_numeric(&mut self, num: u32) {
63 self.numeric = num;
64 }
65}
66
67struct Entry<M: Manager, O, const ARITY: usize> {
69 mutex: crate::util::RawMutex,
71 edge_operands: UnsafeCell<u8>,
73 numeric_operands: UnsafeCell<u8>,
75 operator: UnsafeCell<MaybeUninit<O>>,
77 operands: UnsafeCell<[Operand<M::Edge>; ARITY]>,
80 value: UnsafeCell<MaybeUninit<M::Edge>>,
82}
83
84struct EntryGuard<'a, M: Manager, O, const ARITY: usize>(&'a Entry<M, O, ARITY>);
85
86unsafe impl<M: Manager, O: Send, const ARITY: usize> Send for Entry<M, O, ARITY> where M::Edge: Send {}
88unsafe impl<M: Manager, O: Send, const ARITY: usize> Sync for Entry<M, O, ARITY> where M::Edge: Send {}
89
90impl<M: Manager, O: Copy + Eq, const ARITY: usize> Entry<M, O, ARITY> {
91 #[allow(clippy::declare_interior_mutable_const)]
94 const INIT: Self = Self {
95 mutex: crate::util::RawMutex::INIT,
96 operator: UnsafeCell::new(MaybeUninit::uninit()),
97 edge_operands: UnsafeCell::new(0),
98 numeric_operands: UnsafeCell::new(0),
99 operands: UnsafeCell::new([Operand::UNINIT; ARITY]),
100 value: UnsafeCell::new(MaybeUninit::uninit()),
101 };
102
103 #[inline]
104 fn lock(&self) -> EntryGuard<'_, M, O, ARITY> {
105 self.mutex.lock();
106 EntryGuard(self)
107 }
108
109 #[inline]
110 fn try_lock(&self) -> Option<EntryGuard<'_, M, O, ARITY>> {
111 if self.mutex.try_lock() {
112 Some(EntryGuard(self))
113 } else {
114 None
115 }
116 }
117}
118
119impl<M: Manager, O, const ARITY: usize> Drop for EntryGuard<'_, M, O, ARITY> {
120 #[inline]
121 fn drop(&mut self) {
122 unsafe { self.0.mutex.unlock() }
124 }
125}
126
127impl<M: Manager, O, const ARITY: usize> EntryGuard<'_, M, O, ARITY>
128where
129 O: Copy + Eq,
130{
131 #[inline]
133 fn is_occupied(&self) -> bool {
134 unsafe { *self.0.edge_operands.get() != 0 }
136 }
137
138 #[inline]
141 fn get(
142 &self,
143 manager: &M,
144 operator: O,
145 operands: &[Borrowed<M::Edge>],
146 numeric_operands: &[u32],
147 ) -> Option<M::Edge> {
148 debug_assert_ne!(operands.len(), 0);
149
150 #[cfg(feature = "statistics")]
151 STAT_ACCESSES.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
152
153 let num_edge_operands = operands.len();
154 let num_numeric_operands = numeric_operands.len();
155
156 if num_edge_operands != unsafe { *self.0.edge_operands.get() } as usize
158 || numeric_operands.len() != unsafe { *self.0.numeric_operands.get() } as usize
159 {
160 return None;
161 }
162 if operator != unsafe { (*self.0.operator.get()).assume_init() } {
164 return None;
165 }
166 let (entry_operands, remaining) =
168 unsafe { &*self.0.operands.get() }.split_at(num_edge_operands);
169 let entry_numeric_operands = &remaining[..num_numeric_operands];
170
171 for (o1, o2) in operands.iter().zip(entry_operands) {
172 if &**o1 != unsafe { o2.assume_edge_ref() } {
174 return None;
175 }
176 }
177 for (o1, o2) in numeric_operands.iter().zip(entry_numeric_operands) {
178 if *o1 != unsafe { o2.assume_numeric() } {
182 return None;
183 }
184 }
185
186 #[cfg(feature = "statistics")]
187 STAT_HITS.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
188
189 Some(manager.clone_edge(unsafe { (*self.0.value.get()).assume_init_ref() }))
191 }
192
193 #[inline]
202 fn set(
203 &mut self,
204 _manager: &M,
205 operator: O,
206 operands: &[Borrowed<M::Edge>],
207 numeric_operands: &[u32],
208 value: Borrowed<M::Edge>,
209 ) {
210 debug_assert_ne!(operands.len(), 0);
211 debug_assert!(operands.len() + numeric_operands.len() <= ARITY);
212 self.clear();
213
214 #[cfg(feature = "statistics")]
215 STAT_INSERTIONS.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
216
217 let num_edge_operands = operands.len();
218 let num_numeric_operands = numeric_operands.len();
219
220 unsafe { &mut *self.0.operator.get() }.write(operator);
222 let (entry_operands, remaining) =
223 unsafe { &mut *self.0.operands.get() }.split_at_mut(num_edge_operands);
224 let entry_numeric_operands = &mut remaining[..num_numeric_operands];
225
226 for (src, dst) in operands.iter().zip(entry_operands) {
227 dst.write_edge(src.borrowed());
228 }
229 for (src, dst) in numeric_operands.iter().zip(entry_numeric_operands) {
230 dst.write_numeric(*src);
231 }
232
233 let value = unsafe { Borrowed::into_inner(value) };
237 unsafe { &mut *self.0.value.get() }.write(ManuallyDrop::into_inner(value));
239 unsafe { *self.0.edge_operands.get() = num_edge_operands as u8 };
242 unsafe { *self.0.numeric_operands.get() = num_numeric_operands as u8 };
243 }
244
245 #[inline(always)]
247 fn clear(&mut self) {
248 unsafe { *self.0.edge_operands.get() = 0 };
250 }
252}
253
254impl<M, O, H, const ARITY: usize> DMApplyCache<M, O, H, ARITY>
255where
256 M: Manager,
257 O: Copy + Eq + Hash,
258 H: Hasher + Default,
259{
260 const CHECK_ARITY: () = {
261 assert!(
262 0 < ARITY && ARITY <= u8::MAX as usize,
263 "ARITY must be in range [1, 255]"
264 );
265 };
266
267 pub unsafe fn with_capacity(capacity: usize) -> Self {
276 let () = Self::CHECK_ARITY;
277 let buckets = capacity
278 .checked_next_power_of_two()
279 .expect("capacity is too large");
280 #[cfg(not(feature = "hugealloc"))]
281 let mut vec = Vec::with_capacity(buckets);
282 #[cfg(feature = "hugealloc")]
283 let mut vec = Vec::with_capacity_in(buckets, hugealloc::HugeAlloc);
284
285 vec.resize_with(buckets, || Entry::INIT);
286 DMApplyCache(vec.into_boxed_slice(), PhantomData)
287 }
288
289 #[inline]
291 fn bucket(&self, operator: O, operands: &[Borrowed<M::Edge>]) -> &Entry<M, O, ARITY> {
292 let mut hasher = H::default();
293 operator.hash(&mut hasher);
294 for o in operands {
295 o.hash(&mut hasher);
296 }
297 let mask = (self.0.len() - 1) as u64; let index = (hasher.finish() & mask) as usize;
299 debug_assert!(index < self.0.len());
300 unsafe { self.0.get_unchecked(index) }
302 }
303}
304
305#[cfg(feature = "statistics")]
306static STAT_ACCESSES: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
307
308#[cfg(feature = "statistics")]
309static STAT_HITS: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
310
311#[cfg(feature = "statistics")]
312static STAT_INSERTIONS: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
313
314impl<M, O, H, const ARITY: usize> crate::StatisticsGenerator for DMApplyCache<M, O, H, ARITY>
315where
316 M: Manager,
317 O: Copy + Eq,
318{
319 #[cfg(not(feature = "statistics"))]
320 fn print_stats(&self) {}
321
322 #[cfg(feature = "statistics")]
323 fn print_stats(&self) {
324 let count = self.0.len();
325 debug_assert!(count > 0);
326 let occupied = self.0.iter().filter(|e| e.lock().is_occupied()).count();
327
328 let accesses = STAT_ACCESSES.swap(0, std::sync::atomic::Ordering::Relaxed);
329 let hits = STAT_HITS.swap(0, std::sync::atomic::Ordering::Relaxed);
330 let insertions = STAT_INSERTIONS.swap(0, std::sync::atomic::Ordering::Relaxed);
331 println!(
332 "[DMApplyCache] fill level: {:.2} %, accesses: {accesses}, hits: {hits}, insertions: {insertions}, hit ratio ~{:.2} %",
333 100.0 * occupied as f32 / count as f32,
334 100.0 * hits as f32 / accesses as f32,
335 );
336 }
337}
338
339impl<M, O, H, const ARITY: usize> ApplyCache<M, O> for DMApplyCache<M, O, H, ARITY>
340where
341 M: Manager,
342 O: Copy + Hash + Ord,
343 H: Hasher + Default,
344{
345 #[inline(always)]
346 fn get_with_numeric(
347 &self,
348 manager: &M,
349 operator: O,
350 operands: &[Borrowed<M::Edge>],
351 numeric_operands: &[u32],
352 ) -> Option<M::Edge> {
353 if operands.is_empty() || operands.len() + numeric_operands.len() > ARITY {
354 return None;
355 }
356 self.bucket(operator, operands).try_lock()?.get(
357 manager,
358 operator,
359 operands,
360 numeric_operands,
361 )
362 }
363
364 #[inline(always)]
365 fn add_with_numeric(
366 &self,
367 manager: &M,
368 operator: O,
369 operands: &[Borrowed<M::Edge>],
370 numeric_operands: &[u32],
371 value: Borrowed<M::Edge>,
372 ) {
373 if operands.is_empty() || operands.len() + numeric_operands.len() > ARITY {
374 return;
375 }
376 if let Some(mut entry) = self.bucket(operator, operands).try_lock() {
377 entry.set(manager, operator, operands, numeric_operands, value);
378 }
379 }
380
381 fn clear(&self, _manager: &M) {
382 for entry in &*self.0 {
383 entry.lock().clear();
384 }
385 }
386}
387
388impl<M, O, H, const ARITY: usize> ManagerEventSubscriber<M> for DMApplyCache<M, O, H, ARITY>
389where
390 M: Manager,
391 O: Copy + Hash + Ord,
392 H: Hasher + Default,
393{
394 fn pre_gc(&self, _manager: &M) {
395 for entry in &*self.0 {
398 let mut entry = entry.lock();
399 entry.clear();
400 std::mem::forget(entry);
402 }
403 }
404
405 unsafe fn post_gc(&self, _manager: &M) {
406 for entry in &*self.0 {
407 unsafe { entry.mutex.unlock() };
411 }
412 }
413}
414
415impl<M, O, H, const ARITY: usize> fmt::Debug for DMApplyCache<M, O, H, ARITY>
416where
417 M: Manager,
418 M::Edge: fmt::Debug,
419 O: Copy + Eq + fmt::Debug,
420{
421 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
422 f.debug_map()
423 .entries(
424 self.0
425 .iter()
426 .enumerate()
427 .map(|(i, e)| (i, e.lock()))
428 .filter(|(_, e)| e.is_occupied()),
429 )
430 .finish()
431 }
432}
433
434impl<M, O, const ARITY: usize> fmt::Debug for EntryGuard<'_, M, O, ARITY>
435where
436 M: Manager,
437 M::Edge: fmt::Debug,
438 O: Copy + Eq + fmt::Debug,
439{
440 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
441 let edge_operands = unsafe { *self.0.edge_operands.get() } as usize;
443 if edge_operands == 0 {
444 write!(f, "None")
445 } else {
446 let operator = unsafe { (*self.0.operator.get()).assume_init() };
448 let operands = unsafe { &(&*self.0.operands.get())[..edge_operands] };
450 write!(f, "{{{{ {operator:?}({:?}", unsafe {
452 operands[0].assume_edge_ref()
453 })?;
454 for operand in &operands[1..] {
455 write!(f, ", {:?}", unsafe { operand.assume_edge_ref() })?;
457 }
458 let value = unsafe { (*self.0.value.get()).assume_init_ref() };
460 write!(f, ") = {value:?} }}}}")
461 }
462 }
463}