1use crate::Value;
7use crate::{GcConfig, GcError, GcPtr, GcStats, Result};
8use std::collections::{HashMap, HashSet};
9use std::sync::atomic::{AtomicUsize, Ordering};
10
11#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
13pub enum SizeClass {
14 Small, Medium, Large, }
18
19impl SizeClass {
20 pub fn from_size(size: usize) -> Self {
21 match size {
22 0..=64 => SizeClass::Small,
23 65..=512 => SizeClass::Medium,
24 _ => SizeClass::Large,
25 }
26 }
27
28 pub fn allocation_size(&self) -> usize {
29 match self {
30 SizeClass::Small => 64,
31 SizeClass::Medium => 512,
32 SizeClass::Large => 4096,
33 }
34 }
35}
36
37#[derive(Debug)]
39pub struct Generation {
40 pub number: usize,
42
43 blocks: HashMap<SizeClass, Vec<MemoryBlock>>,
45
46 allocation_cursors: HashMap<SizeClass, usize>,
48
49 allocated_bytes: AtomicUsize,
51
52 max_size: usize,
54
55 survivor_objects: Vec<usize>,
57
58 allocated_ptrs: Vec<*const u8>,
60}
61
62impl Generation {
63 fn new(number: usize, max_size: usize) -> Self {
64 let mut blocks = HashMap::new();
65 let mut allocation_cursors = HashMap::new();
66
67 for &size_class in &[SizeClass::Small, SizeClass::Medium, SizeClass::Large] {
69 blocks.insert(
70 size_class,
71 vec![MemoryBlock::new(size_class.allocation_size())],
72 );
73 allocation_cursors.insert(size_class, 0);
74 }
75
76 Self {
77 number,
78 blocks,
79 allocation_cursors,
80 allocated_bytes: AtomicUsize::new(0),
81 max_size,
82 survivor_objects: Vec::new(),
83 allocated_ptrs: Vec::new(),
84 }
85 }
86
87 fn allocate(&mut self, size: usize) -> Result<*mut u8> {
89 let size_class = SizeClass::from_size(size);
90 let aligned_size = self.align_size(size);
91
92 if self.allocated_bytes.load(Ordering::Relaxed) + aligned_size > self.max_size {
94 return Err(GcError::OutOfMemory(format!(
95 "Generation {} out of memory",
96 self.number
97 )));
98 }
99
100 let blocks = self.blocks.get_mut(&size_class).unwrap();
101 let cursor = self.allocation_cursors.get_mut(&size_class).unwrap();
102
103 if let Some(block) = blocks.get_mut(*cursor) {
105 if let Some(ptr) = block.allocate(aligned_size) {
106 self.allocated_bytes
107 .fetch_add(aligned_size, Ordering::Relaxed);
108 self.allocated_ptrs.push(ptr as *const u8);
110 return Ok(ptr);
111 }
112 }
113
114 let new_block = MemoryBlock::new(std::cmp::max(
116 size_class.allocation_size(),
117 aligned_size * 2,
118 ));
119 blocks.push(new_block);
120 *cursor = blocks.len() - 1;
121
122 let block = blocks.last_mut().unwrap();
124 if let Some(ptr) = block.allocate(aligned_size) {
125 self.allocated_bytes
126 .fetch_add(aligned_size, Ordering::Relaxed);
127 self.allocated_ptrs.push(ptr as *const u8);
128 Ok(ptr)
129 } else {
130 Err(GcError::OutOfMemory(
131 "Failed to allocate from new block".to_string(),
132 ))
133 }
134 }
135
136 fn align_size(&self, size: usize) -> usize {
138 (size + std::mem::align_of::<*const u8>() - 1) & !(std::mem::align_of::<*const u8>() - 1)
139 }
140
141 pub fn allocated_bytes(&self) -> usize {
143 self.allocated_bytes.load(Ordering::Relaxed)
144 }
145
146 pub fn is_full(&self) -> bool {
148 self.allocated_bytes() >= self.max_size
149 }
150
151 pub fn mark_survivor(&mut self, ptr: *const u8) {
153 self.survivor_objects.push(ptr as usize);
154 }
155
156 pub fn take_survivors(&mut self) -> Vec<usize> {
158 std::mem::take(&mut self.survivor_objects)
159 }
160
161 pub fn take_allocated_ptrs(&mut self) -> Vec<*const u8> {
163 std::mem::take(&mut self.allocated_ptrs)
164 }
165
166 pub fn reset(&mut self) {
168 for blocks in self.blocks.values_mut() {
169 for block in blocks {
170 block.reset();
171 }
172 }
173 for cursor in self.allocation_cursors.values_mut() {
174 *cursor = 0;
175 }
176 self.allocated_bytes.store(0, Ordering::Relaxed);
177 self.survivor_objects.clear();
178 self.allocated_ptrs.clear();
179 }
180}
181
182#[derive(Debug)]
184struct MemoryBlock {
185 memory: Vec<u8>,
186 current_offset: usize,
187 size: usize,
188}
189
190impl MemoryBlock {
191 fn new(size: usize) -> Self {
192 Self {
193 memory: vec![0; size],
194 current_offset: 0,
195 size,
196 }
197 }
198
199 fn allocate(&mut self, size: usize) -> Option<*mut u8> {
200 if self.current_offset + size <= self.size {
201 let ptr = unsafe { self.memory.as_mut_ptr().add(self.current_offset) };
202 self.current_offset += size;
203 Some(ptr)
204 } else {
205 None
206 }
207 }
208
209 fn reset(&mut self) {
210 self.current_offset = 0;
211 self.memory.fill(0);
213 }
214
215 fn contains(&self, ptr: *const u8) -> bool {
216 let start = self.memory.as_ptr() as usize;
217 let end = start + self.size;
218 let addr = ptr as usize;
219 addr >= start && addr < end
220 }
221}
222
223pub struct GenerationalAllocator {
225 generations: Vec<Generation>,
227
228 config: GcConfig,
230
231 total_allocations: AtomicUsize,
233
234 survival_counts: HashMap<*const u8, usize>,
236
237 promoted_ptrs: HashSet<*const u8>,
239}
240
241impl GenerationalAllocator {
242 pub fn new(config: &GcConfig) -> Self {
243 let mut generations = Vec::new();
244
245 let mut gen_size = config.young_generation_size;
247 for i in 0..config.num_generations {
248 generations.push(Generation::new(i, gen_size));
249 gen_size *= 2; }
251
252 Self {
253 generations,
254 config: config.clone(),
255 total_allocations: AtomicUsize::new(0),
256 survival_counts: HashMap::new(),
257 promoted_ptrs: HashSet::new(),
258 }
259 }
260
261 pub fn allocate(&mut self, value: Value, stats: &GcStats) -> Result<GcPtr<Value>> {
263 let size = self.estimate_value_size(&value);
264
265 let ptr = self.generations[0].allocate(size)?;
267
268 unsafe {
270 std::ptr::write(ptr as *mut Value, value);
271 }
272
273 stats.record_allocation(size);
275
276 Ok(unsafe { GcPtr::from_raw(ptr as *const Value) })
277 }
278
279 pub fn young_take_allocations(&mut self) -> Vec<*const u8> {
281 self.generations[0].take_allocated_ptrs()
282 }
283
284 pub fn young_reset(&mut self) {
286 self.generations[0].reset();
287 }
288
289 pub fn young_mark_survivor(&mut self, ptr: *const u8) {
291 self.generations[0].mark_survivor(ptr);
292 }
293
294 pub fn young_allocations_count(&self) -> usize {
296 self.generations[0].allocated_ptrs.len()
297 }
298
299 #[allow(clippy::not_unsafe_ptr_arg_deref)]
301 pub fn promote(&mut self, ptr: *const Value, _from_gen: usize) -> Result<GcPtr<Value>> {
302 let raw = ptr as *const u8;
304 self.promoted_ptrs.insert(raw);
305 Ok(unsafe { GcPtr::from_raw(ptr) })
306 }
307
308 pub fn young_has_survivors(&self) -> bool {
310 !self.generations[0].survivor_objects.is_empty()
311 }
312
313 pub fn find_generation(&self, ptr: *const u8) -> Option<usize> {
315 for (i, gen) in self.generations.iter().enumerate() {
316 for blocks in gen.blocks.values() {
317 for block in blocks {
318 if block.contains(ptr) {
319 return Some(i);
320 }
321 }
322 }
323 }
324 None
325 }
326
327 pub fn young_generation_usage(&self) -> f64 {
329 let gen = &self.generations[0];
330 gen.allocated_bytes() as f64 / gen.max_size as f64
331 }
332
333 pub fn total_usage(&self) -> f64 {
335 let total_allocated: usize = self.generations.iter().map(|g| g.allocated_bytes()).sum();
336 let total_capacity: usize = self.generations.iter().map(|g| g.max_size).sum();
337 total_allocated as f64 / total_capacity as f64
338 }
339
340 pub fn reconfigure(&mut self, config: &GcConfig) -> Result<()> {
342 self.config = config.clone();
343
344 if config.num_generations != self.generations.len() {
346 return Err(GcError::ConfigError(
347 "Cannot change number of generations at runtime".to_string(),
348 ));
349 }
350
351 let mut gen_size = config.young_generation_size;
353 for (i, gen) in self.generations.iter_mut().enumerate() {
354 if gen.max_size != gen_size {
355 log::info!(
356 "Resizing generation {} from {} to {} bytes",
357 i,
358 gen.max_size,
359 gen_size
360 );
361 gen.max_size = gen_size;
362 }
363 gen_size *= 2; }
365
366 Ok(())
367 }
368
369 pub fn note_survivor_and_maybe_promote(&mut self, ptr: *const u8) -> bool {
372 let count = self.survival_counts.entry(ptr).or_insert(0);
373 *count += 1;
374 if *count >= self.config.promotion_threshold {
375 self.promoted_ptrs.insert(ptr);
376 self.survival_counts.remove(&ptr);
378 return true;
379 }
380 false
381 }
382
383 pub fn logical_generation(&self, ptr: *const u8) -> Option<usize> {
385 if self.promoted_ptrs.contains(&ptr) {
386 return Some(1);
387 }
388 self.find_generation(ptr)
390 }
391
392 pub fn clear_promotion_state(&mut self) {
394 self.survival_counts.clear();
395 }
397
398 #[allow(clippy::only_used_in_recursion)]
400 fn estimate_value_size(&self, _value: &Value) -> usize {
401 std::mem::size_of::<Value>()
406 }
407
408 pub fn stats(&self) -> AllocatorStats {
410 AllocatorStats {
411 total_allocations: self.total_allocations.load(Ordering::Relaxed),
412 generation_usage: self
413 .generations
414 .iter()
415 .map(|g| g.allocated_bytes())
416 .collect(),
417 young_generation_usage: self.young_generation_usage(),
418 total_usage: self.total_usage(),
419 }
420 }
421}
422
423#[derive(Debug, Clone)]
425pub struct AllocatorStats {
426 pub total_allocations: usize,
427 pub generation_usage: Vec<usize>,
428 pub young_generation_usage: f64,
429 pub total_usage: f64,
430}
431
432#[cfg(test)]
433mod tests {
434 use super::*;
435 use crate::config::GcConfig;
436
437 #[test]
438 fn test_size_class_classification() {
439 assert_eq!(SizeClass::from_size(32), SizeClass::Small);
440 assert_eq!(SizeClass::from_size(100), SizeClass::Medium);
441 assert_eq!(SizeClass::from_size(1000), SizeClass::Large);
442 }
443
444 #[test]
445 fn test_generation_allocation() {
446 let mut gen = Generation::new(0, 1024);
447
448 let ptr = gen.allocate(64).expect("allocation should succeed");
450 assert!(!ptr.is_null());
451 assert!(gen.allocated_bytes() >= 64);
452 }
453
454 #[test]
455 fn test_allocator_basic() {
456 let config = GcConfig::default();
457 let mut allocator = GenerationalAllocator::new(&config);
458 let stats = GcStats::new();
459
460 let value = Value::Num(42.0);
461 let ptr = allocator
462 .allocate(value, &stats)
463 .expect("allocation should succeed");
464
465 assert_eq!(*ptr, Value::Num(42.0));
466 }
467}