1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#![no_std]
use umm_malloc_sys as ffi;
#[inline]
pub unsafe fn init(start_addr: usize, size: usize) {
ffi::umm_init(start_addr as *mut _, size)
}
struct UmmHeap {}
#[global_allocator]
static ALLOCATOR: UmmHeap = UmmHeap {};
pub const MIN_ALIGN: usize = 8;
unsafe impl core::alloc::GlobalAlloc for UmmHeap {
#[inline]
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
if layout.align() <= MIN_ALIGN {
ffi::umm_malloc(layout.size()).cast()
} else {
unimplemented!("Aligned alloc not implemented");
}
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, _layout: core::alloc::Layout) {
ffi::umm_free(ptr.cast());
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: core::alloc::Layout, new_size: usize) -> *mut u8 {
if layout.align() <= MIN_ALIGN {
ffi::umm_realloc(ptr.cast(), new_size).cast()
} else {
unimplemented!("Aligned alloc not implemented");
}
}
}
#[cfg(all(feature = "sync", feature = "cortex-m"))]
mod cortex_m_sync {
use core::sync::atomic::{AtomicBool, Ordering};
static NEEDS_ENABLE: AtomicBool = AtomicBool::new(false);
#[no_mangle]
#[inline]
unsafe extern "C" fn _umm_critical_entry() {
let primask = cortex_m::register::primask::read();
cortex_m::interrupt::disable();
NEEDS_ENABLE.store(primask.is_active(), Ordering::SeqCst);
}
#[no_mangle]
#[inline]
unsafe extern "C" fn _umm_critical_exit() {
let needs_enable = NEEDS_ENABLE.swap(false, Ordering::SeqCst);
if needs_enable {
cortex_m::interrupt::enable();
}
}
}