buddy_alloc/
fast_alloc.rs1pub const BLOCK_SIZE: usize = 64;
6
7pub const DEFAULT_INITIALIZED_NODES: usize = 4;
9
10struct Node {
11 next: *mut Node,
12 prev: *mut Node,
13}
14
15impl Node {
16 fn init(list: *mut Node) {
17 unsafe {
18 list.write(Node {
19 next: list,
20 prev: list,
21 });
22 }
23 }
24
25 fn remove(list: *mut Node) {
26 unsafe {
27 core::ptr::write_volatile(&mut (*(*list).prev).next, (*list).next);
30 core::ptr::write_volatile(&mut (*(*list).next).prev, (*list).prev);
31 }
32 }
33
34 fn pop(list: *mut Node) -> *mut Node {
35 let n_list: *mut Node = unsafe { (*list).next };
36 Self::remove(n_list);
37 n_list
38 }
39
40 fn push(list: *mut Node, p: *mut u8) {
41 let p = p.cast::<Node>();
42 unsafe {
43 let n_list: Node = Node {
44 prev: list,
45 next: (*list).next,
46 };
47 p.write(n_list);
48 core::ptr::write_volatile(&mut (*(*list).next).prev, p);
51 core::ptr::write_volatile(&mut (*list).next, p);
52 }
53 }
54
55 fn is_empty(list: *const Node) -> bool {
56 unsafe { (*list).next as *const Node == list }
57 }
58}
59
60#[derive(Clone, Copy)]
61pub struct FastAllocParam {
62 base_addr: *const u8,
63 len: usize,
64 initialized_nodes: usize,
65}
66
67impl FastAllocParam {
68 pub const fn new(base_addr: *const u8, len: usize) -> Self {
69 FastAllocParam {
70 base_addr,
71 len,
72 initialized_nodes: DEFAULT_INITIALIZED_NODES,
73 }
74 }
75
76 pub const fn new_with_initialized_nodes(
77 base_addr: *const u8,
78 len: usize,
79 initialized_nodes: usize,
80 ) -> Self {
81 FastAllocParam {
82 base_addr,
83 len,
84 initialized_nodes,
85 }
86 }
87}
88
89pub struct FastAlloc {
90 base_addr: usize,
92 end_addr: usize,
94 next_addr: usize,
96 free: *mut Node,
97}
98
99impl FastAlloc {
100 pub unsafe fn new(param: FastAllocParam) -> Self {
105 let FastAllocParam {
106 base_addr,
107 len,
108 initialized_nodes,
109 } = param;
110 let nblocks = len / BLOCK_SIZE;
111 debug_assert_eq!(len % BLOCK_SIZE, 0);
112
113 let base_addr = base_addr as usize;
114 let end_addr = base_addr + nblocks * BLOCK_SIZE;
115
116 debug_assert_eq!(
117 base_addr % BLOCK_SIZE,
118 0,
119 "base_addr must align to block size"
120 );
121
122 let cblocks = core::cmp::min(nblocks, initialized_nodes);
124
125 let free = base_addr as *mut Node;
127 Node::init(free);
128
129 let mut addr = base_addr;
130 for _ in 1..cblocks {
131 addr += BLOCK_SIZE;
132 Node::push(free, addr as *mut u8);
133 }
134
135 FastAlloc {
136 base_addr,
137 end_addr,
138 next_addr: addr + BLOCK_SIZE,
139 free,
140 }
141 }
142
143 pub fn contains_ptr(&self, p: *mut u8) -> bool {
144 let addr = p as usize;
145 addr >= self.base_addr && addr < self.end_addr
146 }
147
148 pub fn malloc(&mut self, nbytes: usize) -> *mut u8 {
149 if nbytes > BLOCK_SIZE {
150 return core::ptr::null_mut();
151 }
152
153 if self.free.is_null() {
154 if self.next_addr < self.end_addr {
155 let result = self.next_addr;
156 self.next_addr += BLOCK_SIZE;
157 return result as *mut u8;
158 } else {
159 return core::ptr::null_mut();
160 }
161 }
162
163 let is_last = Node::is_empty(self.free);
164 let p = Node::pop(self.free) as *mut u8;
165 if is_last {
166 self.free = core::ptr::null_mut();
167 }
168 p
169 }
170
171 pub fn free(&mut self, p: *mut u8) {
172 debug_assert!(self.contains_ptr(p));
173 if self.free.is_null() {
174 let n = p.cast();
175 Node::init(n);
176 self.free = n;
177 } else {
178 Node::push(self.free, p);
179 }
180 }
181}