lamellar/
lamellar_alloc.rs

1use crate::env_var::config;
2
3use core::marker::PhantomData;
4use indexmap::IndexSet;
5// use log::trace;
6use parking_lot::{Condvar, Mutex};
7use std::collections::BTreeMap;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::sync::Arc;
10
11pub(crate) trait LamellarAlloc {
12    fn new(id: String) -> Self;
13    fn init(&mut self, start_addr: usize, size: usize); //size in bytes
14    #[allow(dead_code)]
15    fn malloc(&self, size: usize, align: usize) -> usize;
16    fn try_malloc(&self, size: usize, align: usize) -> Option<usize>;
17    fn fake_malloc(&self, size: usize, align: usize) -> bool;
18    fn free(&self, addr: usize) -> Result<(), usize>;
19    fn space_avail(&self) -> usize;
20    fn occupied(&self) -> usize;
21}
22
23#[derive(Debug)]
24#[allow(dead_code)]
25struct Vma {
26    addr: usize,
27    padding: usize,
28    size: usize,
29}
30
31#[derive(Clone)]
32#[allow(dead_code)]
33pub(crate) struct LinearAlloc {
34    entries: Arc<(Mutex<Vec<Vma>>, Condvar)>,
35    start_addr: usize,
36    max_size: usize,
37    last_idx: Arc<AtomicUsize>,
38    _id: String,
39    free_space: Arc<AtomicUsize>,
40}
41
42fn calc_padding(addr: usize, align: usize) -> usize {
43    let rem = addr % align;
44    if rem == 0 {
45        0
46    } else {
47        align - rem
48    }
49}
50
51impl LamellarAlloc for LinearAlloc {
52    fn new(id: String) -> LinearAlloc {
53        // trace!("new linear alloc: {:?}", id);
54        LinearAlloc {
55            entries: Arc::new((Mutex::new(Vec::new()), Condvar::new())),
56            start_addr: 0,
57            max_size: 0,
58            last_idx: Arc::new(AtomicUsize::new(0)),
59            _id: id,
60            free_space: Arc::new(AtomicUsize::new(0)),
61        }
62    }
63
64    fn init(&mut self, start_addr: usize, size: usize) {
65        // trace!("init: {:?} {:x} {:?}", self.id, start_addr, size);
66        self.start_addr = start_addr;
67        self.max_size = size;
68        self.free_space.store(size, Ordering::SeqCst);
69    }
70
71    fn malloc(&self, size: usize, align: usize) -> usize {
72        let mut val = self.try_malloc(size, align);
73        while let None = val {
74            val = self.try_malloc(size, align);
75        }
76        val.unwrap()
77    }
78
79    fn try_malloc(&self, size: usize, align: usize) -> Option<usize> {
80        let &(ref lock, ref cvar) = &*self.entries;
81        let mut entries = lock.lock();
82
83        if entries.len() > 0 {
84            // let padding = align - (self.start_addr % align);
85            // let mut prev_end = self.start_addr + padding;
86            let mut prev_end = self.start_addr;
87            let mut padding = calc_padding(prev_end, align);
88            let mut idx = 0;
89            for i in 0..entries.len() {
90                if entries[i].addr - prev_end >= size + padding {
91                    break;
92                }
93                prev_end = entries[i].addr + entries[i].size + entries[i].padding;
94                padding = calc_padding(prev_end, align);
95                idx = i + 1;
96            }
97
98            if prev_end + size + padding <= self.start_addr + self.max_size {
99                let n_vma = Vma {
100                    addr: prev_end,
101                    padding: padding,
102                    size: size,
103                };
104                entries.insert(idx, n_vma);
105                self.free_space.fetch_sub(size + padding, Ordering::SeqCst);
106                return Some(prev_end + padding);
107            } else {
108                cvar.wait_for(&mut entries, std::time::Duration::from_millis(1));
109                return None;
110            }
111        } else {
112            let padding = calc_padding(self.start_addr, align);
113            if size + padding <= self.start_addr + self.max_size {
114                let n_vma = Vma {
115                    addr: self.start_addr,
116                    padding: padding,
117                    size: size,
118                };
119                entries.push(n_vma);
120                self.free_space.fetch_sub(size + padding, Ordering::SeqCst);
121                return Some(self.start_addr + padding);
122            } else {
123                cvar.wait_for(&mut entries, std::time::Duration::from_millis(1));
124                return None;
125            }
126        }
127    }
128
129    fn fake_malloc(&self, size: usize, align: usize) -> bool {
130        let &(ref lock, ref _cvar) = &*self.entries;
131        let entries = lock.lock();
132
133        if entries.len() > 0 {
134            let mut prev_end = self.start_addr;
135            let mut padding = calc_padding(prev_end, align);
136            for i in 0..entries.len() {
137                if entries[i].addr - prev_end >= size + padding {
138                    break;
139                }
140                prev_end = entries[i].addr + entries[i].size + entries[i].padding;
141                padding = calc_padding(prev_end, align);
142            }
143
144            if prev_end + size + padding <= self.start_addr + self.max_size {
145                return true;
146            } else {
147                return false;
148            }
149        } else {
150            let padding = calc_padding(self.start_addr, align);
151            if size + padding <= self.start_addr + self.max_size {
152                return true;
153            } else {
154                return false;
155            }
156        }
157    }
158
159    fn free(&self, addr: usize) -> Result<(), usize> {
160        let &(ref lock, ref cvar) = &*self.entries;
161        let mut entries = lock.lock();
162        for i in 0..entries.len() {
163            if addr - entries[i].padding as usize == entries[i].addr as usize {
164                self.free_space
165                    .fetch_add(entries[i].size + entries[i].padding, Ordering::SeqCst);
166                entries.remove(i);
167                let last_idx = self.last_idx.load(Ordering::SeqCst);
168                if last_idx >= entries.len() && last_idx != 0 {
169                    self.last_idx.fetch_sub(1, Ordering::SeqCst);
170                }
171                cvar.notify_all();
172
173                return Ok(());
174            }
175        }
176        Err(addr)
177    }
178    fn space_avail(&self) -> usize {
179        self.free_space.load(Ordering::SeqCst)
180    }
181    fn occupied(&self) -> usize {
182        self.max_size - self.free_space.load(Ordering::SeqCst)
183    }
184}
185
186#[derive(Clone, Debug)]
187struct FreeEntries {
188    sizes: BTreeMap<usize, IndexSet<usize>>, //<size,<Vec<addr>>
189    addrs: BTreeMap<usize, (usize, usize)>,  //<addr,(size,padding)>
190}
191
192impl FreeEntries {
193    fn new() -> FreeEntries {
194        FreeEntries {
195            sizes: BTreeMap::new(),
196            addrs: BTreeMap::new(),
197        }
198    }
199
200    fn merge(&mut self) {
201        let mut i = 0;
202        while i < self.addrs.len() - 1 {
203            let (faddr, (fsize, fpadding)) = self.addrs.pop_first().unwrap();
204            let (naddr, (nsize, npadding)) = self.addrs.pop_first().unwrap();
205            if faddr + fsize + fpadding == naddr {
206                //merge
207                let new_size = fsize + nsize;
208                let new_padding = fpadding + npadding;
209                assert!(new_padding == 0);
210                let new_addr = faddr;
211                self.remove_size(naddr, nsize);
212                self.remove_size(faddr, fsize);
213                self.addrs.insert(new_addr, (new_size, new_padding));
214                self.sizes
215                    .entry(new_size)
216                    .or_insert(IndexSet::new())
217                    .insert(new_addr);
218            } else {
219                self.addrs.insert(faddr, (fsize, fpadding));
220                self.addrs.insert(naddr, (nsize, npadding));
221                i += 1;
222            }
223        }
224    }
225
226    fn remove_size(&mut self, addr: usize, size: usize) {
227        let mut remove_size = false;
228        if let Some(addrs) = self.sizes.get_mut(&size) {
229            addrs.remove(&addr);
230            if addrs.is_empty() {
231                remove_size = true;
232            }
233        }
234        if remove_size {
235            self.sizes.remove(&size);
236        }
237    }
238}
239
240#[derive(Clone, Debug)]
241pub(crate) struct BTreeAlloc {
242    free_entries: Arc<(Mutex<FreeEntries>, Condvar)>,
243    allocated_addrs: Arc<(Mutex<BTreeMap<usize, (usize, usize)>>, Condvar)>, //<addr,(size,padding)>
244    pub(crate) start_addr: usize,
245    pub(crate) max_size: usize,
246    id: String,
247    free_space: Arc<AtomicUsize>,
248}
249
250impl BTreeAlloc {}
251
252impl LamellarAlloc for BTreeAlloc {
253    fn new(id: String) -> BTreeAlloc {
254        // trace!("new BTreeAlloc: {:?}", id);
255        BTreeAlloc {
256            free_entries: Arc::new((Mutex::new(FreeEntries::new()), Condvar::new())),
257            allocated_addrs: Arc::new((Mutex::new(BTreeMap::new()), Condvar::new())),
258            start_addr: 0,
259            max_size: 0,
260            id: id,
261            free_space: Arc::new(AtomicUsize::new(0)),
262        }
263    }
264    fn init(&mut self, start_addr: usize, size: usize) {
265        // println!("init: {:?} {:x} {:?}", self.id, start_addr, size);
266        self.start_addr = start_addr;
267        self.max_size = size;
268        let &(ref lock, ref _cvar) = &*self.free_entries;
269        let mut free_entries = lock.lock();
270        let mut temp = IndexSet::new();
271        temp.insert(start_addr);
272        free_entries.sizes.insert(size, temp);
273        free_entries.addrs.insert(start_addr, (size, 0));
274        self.free_space.store(size, Ordering::SeqCst);
275    }
276
277    fn malloc(&self, size: usize, align: usize) -> usize {
278        let mut val = self.try_malloc(size, align);
279        let mut timer = std::time::Instant::now();
280        while let None = val {
281            val = self.try_malloc(size, align);
282            if timer.elapsed().as_secs_f64() > config().deadlock_timeout {
283                println!("[WARNING]  Potential deadlock detected when trying to allocate more memory.\n\
284                The deadlock timeout can be set via the LAMELLAR_DEADLOCK_WARNING_TIMEOUT environment variable, the current timeout is {} seconds\n\
285                To view backtrace set RUST_LIB_BACKTRACE=1\n\
286                {}",config().deadlock_timeout,std::backtrace::Backtrace::capture());
287                timer = std::time::Instant::now();
288            }
289        }
290        val.unwrap()
291    }
292
293    fn try_malloc(&self, size: usize, align: usize) -> Option<usize> {
294        let &(ref lock, ref cvar) = &*self.free_entries;
295        let mut free_entries = lock.lock();
296        // println!("before: {:?}", free_entries);
297        let mut addr: Option<usize> = None;
298        let mut remove_size: Option<usize> = None;
299        let upper_size = size + align - 1; //max size we would need
300
301        let mut try_again = true;
302
303        while try_again {
304            //find smallest memory segment greater than or equal to size
305            if let Some((free_size, addrs)) = free_entries.sizes.range_mut(upper_size..).next() {
306                //the actual size, and list of addrs with that size
307                addr = addrs.pop(); // get the first addr
308
309                //if no more address exist with this size, mark it as removable from the free_entries size map
310                if addrs.is_empty() {
311                    remove_size = Some(free_size.clone());
312                }
313
314                if let Some(a) = addr {
315                    let padding = calc_padding(a, align);
316                    let full_size = size + padding;
317
318                    if let Some((fsize, fpadding)) = free_entries.addrs.remove(&a) {
319                        // remove the address from free_entries address map
320                        // if full_size > fsize {
321                        //     println!("{full_size} {fsize} {size} {align} {padding}  {fpadding}");
322                        // }
323                        if fsize + fpadding != full_size {
324                            let remaining = (fsize + fpadding) - full_size;
325                            let new_addr = a + full_size;
326                            free_entries
327                                .sizes
328                                .entry(remaining)
329                                .or_insert(IndexSet::new()) //insert new entry of remaining size if not already present
330                                .insert(new_addr); //insert address into size map
331                            free_entries.addrs.insert(new_addr, (remaining, 0));
332                        }
333                    } else {
334                        // println!("{full_size}  {size} {align} {padding}");
335                        // println!("{:?}", free_entries.sizes);
336                        // println!("{:?}", free_entries.addrs);
337                        panic!("{:?} addr {:?} not found in free_entries", self.id, a);
338                    }
339                }
340                try_again = false;
341            } else {
342                cvar.wait_for(&mut free_entries, std::time::Duration::from_millis(1));
343                free_entries.merge();
344                if free_entries.sizes.range_mut(upper_size..).next().is_none() {
345                    try_again = false;
346                }
347            }
348        }
349
350        if let Some(rsize) = remove_size {
351            free_entries.sizes.remove(&rsize);
352        }
353        // println!("after: free_entries: {:?}", free_entries);
354        drop(free_entries);
355        addr = if let Some(a) = addr {
356            let padding = calc_padding(a, align);
357            let full_size = size + padding;
358            let &(ref lock, ref _cvar) = &*self.allocated_addrs;
359            let mut allocated_addrs = lock.lock();
360            allocated_addrs.insert(a + padding, (size, padding));
361            // println!("allocated_addrs: {:?}", allocated_addrs);
362            self.free_space.fetch_sub(full_size, Ordering::SeqCst);
363            // println!(
364            //     "alloc addr {:?} = {a:?} + padding {padding} (size: {size}, align: {align}) {:?}",
365            //     a + padding,
366            //     self.free_space.load(Ordering::SeqCst)
367            // );
368            let new_addr = a + padding;
369            // let rem = new_addr % align;
370            // let rem_16 = new_addr % 16;
371            // println!(
372            //     "alloc addr {:x?} {:x?} {new_addr} {a} {padding} {rem} {align} {rem_16}",
373            //     a + padding,
374            //     new_addr,
375            // );
376            Some(new_addr)
377        } else {
378            None
379        };
380
381        addr
382    }
383
384    fn fake_malloc(&self, size: usize, align: usize) -> bool {
385        let &(ref lock, ref _cvar) = &*self.free_entries;
386        let mut free_entries = lock.lock();
387        let upper_size = size + align - 1; //max size we would need
388                                           //find smallest memory segment greater than or equal to size
389        if let Some((_, _)) = free_entries.sizes.range_mut(upper_size..).next() {
390            return true;
391        } else {
392            free_entries.merge();
393            if let Some((_, _)) = free_entries.sizes.range_mut(upper_size..).next() {
394                return true;
395            }
396            return false;
397        }
398    }
399
400    fn free(&self, addr: usize) -> Result<(), usize> {
401        let &(ref lock, ref _cvar) = &*self.allocated_addrs;
402        let mut allocated_addrs = lock.lock();
403        // println!("trying to free: {:x?} {:?}", addr, addr);
404        if let Some((size, padding)) = allocated_addrs.remove(&addr) {
405            // println!("allocated_addrs: {:?}", allocated_addrs);
406            let full_size = size + padding;
407            self.free_space.fetch_add(full_size, Ordering::SeqCst);
408            drop(allocated_addrs);
409            let unpadded_addr = addr - padding;
410            let full_size = size + padding;
411            let mut temp_addr = unpadded_addr;
412            let mut temp_size = full_size;
413            let mut remove = Vec::new();
414            let &(ref lock, ref cvar) = &*self.free_entries;
415            let mut free_entries = lock.lock();
416            if let Some((faddr, (fsize, fpadding))) =
417                free_entries.addrs.range(..temp_addr).next_back()
418            {
419                //look at address before addr
420                if faddr + fsize + fpadding == addr {
421                    //they are next to eachother...
422                    temp_addr = *faddr;
423                    temp_size += fsize + fpadding;
424                    remove.push((*faddr, *fsize, *fpadding));
425                }
426                // else {
427                //     break;
428                // }
429            }
430
431            if let Some((faddr, (fsize, fpadding))) = free_entries.addrs.range(addr..).next() {
432                //look at address after addr
433                if temp_addr + temp_size == *faddr {
434                    //they are next to eachother...
435                    temp_size += fsize + fpadding;
436                    remove.push((*faddr, *fsize, *fpadding));
437                }
438                // else {
439                //     break;
440                // }
441            }
442            for (raddr, rsize, rpadding) in remove {
443                let rfull_size = rsize + rpadding;
444                free_entries.addrs.remove(&raddr);
445                free_entries.remove_size(raddr, rfull_size);
446            }
447            free_entries.addrs.insert(temp_addr, (temp_size, 0));
448            free_entries
449                .sizes
450                .entry(temp_size)
451                .or_insert(IndexSet::new())
452                .insert(temp_addr);
453
454            // free_entries.addrs.insert(unpadded_addr, (full_size, 0));
455            // free_entries
456            //     .sizes
457            //     .entry(full_size)
458            //     .or_insert(IndexSet::new())
459            //     .insert(unpadded_addr);
460
461            // println!("before: {:?}", free_entries);
462
463            // let mut i = 0;
464            // while i < free_entries.addrs.len() - 1 {
465            //     let (faddr, (fsize, fpadding)) = free_entries.addrs.pop_first().unwrap();
466            //     let (naddr, (nsize, npadding)) = free_entries.addrs.pop_first().unwrap();
467            //     if faddr + fsize + fpadding == naddr {
468            //         //merge
469            //         let new_size = fsize + nsize;
470            //         let new_padding = fpadding + npadding;
471            //         assert!(new_padding == 0);
472            //         let new_addr = faddr;
473            //         self.remove_size(&mut free_entries, naddr, nsize);
474            //         self.remove_size(&mut free_entries, faddr, fsize);
475            //         free_entries.addrs.insert(new_addr, (new_size, new_padding));
476            //         free_entries
477            //             .sizes
478            //             .entry(new_size)
479            //             .or_insert(IndexSet::new())
480            //             .insert(new_addr);
481            //     } else {
482            //         free_entries.addrs.insert(faddr, (fsize, fpadding));
483            //         free_entries.addrs.insert(naddr, (nsize, npadding));
484            //         i += 1;
485            //     }
486            // }
487
488            // println!("after: free_entries: {:?}", free_entries);
489            cvar.notify_all();
490            Ok(())
491        } else {
492            // println!(
493            //     "{:?} illegal free, addr not currently allocated: {:?}",
494            //     self.id, addr
495            // );
496            Err(addr)
497        }
498    }
499
500    fn space_avail(&self) -> usize {
501        self.free_space.load(Ordering::SeqCst)
502    }
503    fn occupied(&self) -> usize {
504        self.max_size - self.free_space.load(Ordering::SeqCst)
505    }
506}
507
508#[derive(Clone)]
509#[allow(dead_code)]
510pub(crate) struct ObjAlloc<T: Copy> {
511    free_entries: Arc<(Mutex<Vec<usize>>, Condvar)>,
512    start_addr: usize,
513    max_size: usize,
514    _id: String,
515    phantom: PhantomData<T>,
516}
517
518impl<T: Copy> LamellarAlloc for ObjAlloc<T> {
519    fn new(id: String) -> ObjAlloc<T> {
520        // trace!("new ObjAlloc: {:?}", id);
521        ObjAlloc {
522            free_entries: Arc::new((Mutex::new(Vec::new()), Condvar::new())),
523            start_addr: 0,
524            max_size: 0,
525            _id: id,
526            phantom: PhantomData,
527        }
528    }
529    fn init(&mut self, start_addr: usize, size: usize) {
530        // trace!("init: {:?} {:x} {:?}", self.id, start_addr, size);
531        let align = std::mem::align_of::<T>();
532        let padding = calc_padding(start_addr, align);
533        self.start_addr = start_addr + padding;
534        self.max_size = size;
535        let &(ref lock, ref _cvar) = &*self.free_entries;
536        let mut free_entries = lock.lock();
537        *free_entries = ((start_addr + padding)..(start_addr + size))
538            .step_by(std::mem::size_of::<T>())
539            .collect();
540    }
541
542    fn malloc(&self, size: usize, align: usize) -> usize {
543        let mut val = self.try_malloc(size, align);
544        while let None = val {
545            val = self.try_malloc(size, align);
546        }
547        val.unwrap()
548    }
549
550    fn try_malloc(&self, size: usize, _align: usize) -> Option<usize> {
551        //dont need to worry about align here as we handle padding in init
552        assert_eq!(
553            size, 1,
554            "ObjAlloc does not currently support multiobject allocations"
555        );
556        let &(ref lock, ref cvar) = &*self.free_entries;
557        let mut free_entries = lock.lock();
558        if let Some(addr) = free_entries.pop() {
559            return Some(addr);
560        } else {
561            cvar.wait_for(&mut free_entries, std::time::Duration::from_millis(1));
562            return None;
563        }
564    }
565
566    fn fake_malloc(&self, size: usize, _align: usize) -> bool {
567        //dont need to worry about align here as we handle padding in init
568        assert_eq!(
569            size, 1,
570            "ObjAlloc does not currently support multiobject allocations"
571        );
572        let &(ref lock, ref _cvar) = &*self.free_entries;
573        let free_entries = lock.lock();
574        if free_entries.len() > 1 {
575            true
576        } else {
577            false
578        }
579    }
580
581    fn free(&self, addr: usize) -> Result<(), usize> {
582        let &(ref lock, ref cvar) = &*self.free_entries;
583        let mut free_entries = lock.lock();
584        free_entries.push(addr);
585        cvar.notify_all();
586        Ok(())
587    }
588
589    fn space_avail(&self) -> usize {
590        let &(ref lock, ref _cvar) = &*self.free_entries;
591        let free_entries = lock.lock();
592        free_entries.len()
593    }
594    fn occupied(&self) -> usize {
595        self.max_size - self.space_avail()
596    }
597}
598
599#[cfg(test)]
600mod tests {
601    use super::*;
602    use rand::seq::SliceRandom;
603    use rand::{rngs::StdRng, Rng, SeedableRng};
604
605    fn test_malloc(alloc: &mut impl LamellarAlloc) {
606        alloc.init(0, 100);
607        let mut rng = StdRng::seed_from_u64(0 as u64);
608        let mut shuffled: Vec<usize> = (1..11).collect();
609        shuffled.shuffle(&mut rng);
610
611        let mut cnt = 0;
612        for i in shuffled {
613            assert_eq!(alloc.malloc(i, 1), cnt);
614            cnt += i;
615        }
616        for i in (cnt..100).step_by(5) {
617            assert_eq!(alloc.malloc(5, 1), i);
618        }
619    }
620
621    fn stress<T: LamellarAlloc + Clone + Send + 'static>(alloc: T) {
622        let mut threads = Vec::new();
623        let start = std::time::Instant::now();
624        for _i in 0..10 {
625            let alloc_clone = alloc.clone();
626            let t = std::thread::spawn(move || {
627                let mut rng = rand::thread_rng();
628                let mut addrs: Vec<usize> = Vec::new();
629                let mut i = 0;
630                while i < 100000 {
631                    if rng.gen_range(0..2) == 0 || addrs.len() == 0 {
632                        if let Some(addr) = alloc_clone.try_malloc(1, 1) {
633                            addrs.push(addr);
634                            i += 1;
635                        }
636                    } else {
637                        let index = rng.gen_range(0..addrs.len());
638                        let addr = addrs.remove(index);
639                        alloc_clone
640                            .free(addr)
641                            .expect("Address should have been found and freed");
642                    }
643                }
644                for addr in addrs {
645                    alloc_clone
646                        .free(addr)
647                        .expect("Address should have been found and freed");
648                }
649            });
650            threads.push(t);
651        }
652        for t in threads {
653            t.join().unwrap();
654        }
655        let time = start.elapsed().as_secs_f64();
656
657        println!("time: {:?}", time);
658    }
659
660    #[test]
661    fn test_linear_malloc() {
662        let mut alloc = LinearAlloc::new("linear_malloc".to_string());
663        test_malloc(&mut alloc);
664    }
665
666    #[test]
667    fn test_linear_stress() {
668        let mut alloc = LinearAlloc::new("linear_stress".to_string());
669        alloc.init(0, 100000);
670        stress(alloc.clone());
671        let &(ref lock, ref _cvar) = &*alloc.entries;
672        let entries = lock.lock();
673        assert_eq!(entries.len(), 0);
674    }
675
676    #[test]
677    fn test_bttreealloc_malloc() {
678        let mut alloc = BTreeAlloc::new("bttree_malloc".to_string());
679        test_malloc(&mut alloc);
680    }
681
682    #[test]
683    fn test_bttreealloc_stress() {
684        let mut alloc = BTreeAlloc::new("bttree_stress".to_string());
685        alloc.init(0, 100000);
686        stress(alloc.clone());
687        let &(ref lock, ref _cvar) = &*alloc.free_entries;
688        let free_entries = lock.lock();
689        assert_eq!(free_entries.sizes.len(), 1);
690        assert_eq!(free_entries.addrs.len(), 1);
691        // assert_eq!(free_entries.addrs.get(&0), Some(&100000usize));
692        let &(ref lock, ref _cvar) = &*alloc.allocated_addrs;
693        let allocated_addrs = lock.lock();
694        assert_eq!(allocated_addrs.len(), 0);
695    }
696
697    #[test]
698    fn test_obj_malloc() {
699        let mut alloc = ObjAlloc::<u8>::new("obj_malloc_u8".to_string());
700        alloc.init(0, 10);
701        for i in 0..10 {
702            assert_eq!(alloc.malloc(1, 1), 9 - i); //object allocator returns last address first...
703        }
704
705        let mut alloc = ObjAlloc::<u16>::new("obj_malloc_u16".to_string());
706        alloc.init(0, 10); //only 5 slots
707        for i in 0..5 {
708            assert_eq!(alloc.malloc(1, 1), 8 - (i * std::mem::size_of::<u16>()));
709        }
710        assert_eq!(alloc.try_malloc(1, 1), None);
711    }
712
713    #[test]
714    fn test_obj_u8_stress() {
715        let mut alloc = ObjAlloc::<u8>::new("obj_malloc_u8".to_string());
716        alloc.init(0, 100000);
717        stress(alloc.clone());
718        let &(ref lock, ref _cvar) = &*alloc.free_entries;
719        let free_entries = lock.lock();
720        assert_eq!(free_entries.len(), 100000 / std::mem::size_of::<u8>());
721    }
722    #[test]
723    fn test_obj_f64_stress() {
724        let mut alloc = ObjAlloc::<f64>::new("obj_malloc_u8".to_string());
725        alloc.init(0, 100000);
726        stress(alloc.clone());
727        let &(ref lock, ref _cvar) = &*alloc.free_entries;
728        let free_entries = lock.lock();
729        assert_eq!(free_entries.len(), 100000 / std::mem::size_of::<f64>());
730    }
731}