use crate::phoenix_error;
use std::cmp::Reverse;
use std::collections::BinaryHeap;
use crate::value::{HeapObj, HeapObjVal, Value};
use crate::vm::Global;
const DEBUG_GC: bool = false;
const DEBUG_STRESS_GC: bool = false;
const INIT_GC_THRESHOLD: usize = 200;
const MIN_SCALING_FACTOR: f32 = 0.5;
const MAX_SCALING_FACTOR: f32 = 2.0;
const SHRINK_THRESHOLD: f32 = 0.75;
#[derive(Debug, Clone)]
pub struct GC {
pub instances: Vec<HeapObj>,
allocations: usize,
next_gc_threshold: usize,
grey_worklist: Vec<usize>,
free_slots: BinaryHeap<Reverse<usize>>,
}
impl Default for GC {
fn default() -> Self {
Self {
instances: Vec::new(),
allocations: 0,
next_gc_threshold: INIT_GC_THRESHOLD,
grey_worklist: Vec::new(),
free_slots: BinaryHeap::new(),
}
}
}
impl PartialEq for GC {
fn eq(&self, other: &Self) -> bool {
self.instances == other.instances
&& self.allocations == other.allocations
&& self.next_gc_threshold == other.next_gc_threshold
&& self.grey_worklist == other.grey_worklist
}
}
impl GC {
pub fn alloc(&mut self, val: HeapObj, stack: &[Value], globals: &[Global]) -> Value {
if DEBUG_STRESS_GC || self.allocations >= self.next_gc_threshold {
self.collect_garbage(stack, globals);
}
self.instances.push(val); let index = if self.free_slots.is_empty() {
self.instances.len() - 1
} else {
match self.free_slots.pop() {
Some(Reverse(index)) => {
let placeholder = self.instances.swap_remove(index);
if placeholder.obj != HeapObjVal::HeapPlaceholder {
phoenix_error!(
"VM error! GC attempted to use an allocated value as a free slot"
)
}
index
}
None => phoenix_error!("VM panic!"),
}
};
self.allocations += 1;
if DEBUG_GC {
eprintln!(
"allocated slot {} | # of allocations = {}",
index, self.allocations
)
}
Value::PhoenixPointer(index)
}
fn mark_heap_obj(&mut self, index: usize) {
let obj_opt = self.instances.get_mut(index);
match obj_opt {
Some(obj) => {
if !obj.is_marked {
obj.is_marked = true;
if DEBUG_GC {
eprintln!("marked {:?} at {}", obj.obj_type, index)
}
self.grey_worklist.push(index); }
}
None => panic!("VM panic! Why is there an unallocated pointer?"),
}
}
fn mark_value(&mut self, val: &Value) {
if let Value::PhoenixPointer(ptr) = val {
self.mark_heap_obj(*ptr);
}
}
fn mark_roots(&mut self, stack: &[Value], globals: &[Global]) {
for val in stack.iter() {
self.mark_value(val);
}
for val in globals.iter() {
if let Global::Init(v) = val {
self.mark_value(v);
}
}
}
fn mark_grey(&mut self) {
while let Some(index) = self.grey_worklist.pop() {
let obj_opt = self.instances.get(index);
let mut to_mark = Vec::new();
match obj_opt {
Some(obj) => {
if DEBUG_GC {
eprintln!("blackening {:?} at {}", obj.obj_type, index)
}
match &obj.obj {
HeapObjVal::PhoenixClosure(closure) => {
for val in &closure.values {
if let Value::PhoenixPointer(ptr) = val {
to_mark.push(*ptr);
}
}
}
HeapObjVal::PhoenixInstance(instance) => {
for val in instance.fields.values() {
if let Value::PhoenixPointer(ptr) = val {
to_mark.push(*ptr);
}
}
}
HeapObjVal::PhoenixList(list) => {
for val in list.values.iter() {
if let Value::PhoenixPointer(ptr) = val {
to_mark.push(*ptr);
}
}
}
HeapObjVal::HeapPlaceholder => {
panic!("VM panic! Why do we have a valid reference to a heap placeholder value?")
}
HeapObjVal::PhoenixString(_string) => {
}
HeapObjVal::PhoenixHashMap(map) => {
for val in &map.map {
if let Value::PhoenixPointer(ptr) = val.0 {
to_mark.push(*ptr);
}
if let Value::PhoenixPointer(ptr) = val.1 {
to_mark.push(*ptr);
}
}
}
}
}
None => panic!("VM panic! Why is there an unallocated pointer?"),
}
for ptr in to_mark.iter() {
self.mark_heap_obj(*ptr);
}
}
}
fn sweep(&mut self) -> Option<usize> {
let mut shrinkable_to: Option<usize> = None;
for (index, obj) in self.instances.iter_mut().enumerate() {
if obj.obj == HeapObjVal::HeapPlaceholder {
match shrinkable_to {
Some(_) => {}
None => shrinkable_to = Some(index),
}
} else {
shrinkable_to = None;
if !obj.is_marked {
if DEBUG_GC {
eprintln!(
"freed slot {} | # of allocations = {} | value to free = {:?}",
index, self.allocations, obj,
)
}
let mut placeholder = HeapObj::new_placeholder(); std::mem::swap(obj, &mut placeholder);
self.free_slots.push(Reverse(index));
self.allocations -= 1;
} else {
obj.is_marked = false;
}
}
}
shrinkable_to
}
fn shrink(&mut self, new_size: usize) {
if (new_size as f32) < SHRINK_THRESHOLD * (self.instances.len() as f32) {
if DEBUG_GC {
eprintln!("shrinking from {} to {}", self.instances.len(), new_size)
}
self.instances.truncate(new_size);
let mut new_slots: BinaryHeap<Reverse<usize>> = BinaryHeap::new();
for x in self.free_slots.drain() {
match x {
Reverse(i) => {
if i < new_size {
new_slots.push(x)
}
}
}
}
self.free_slots = new_slots;
} else if DEBUG_GC {
eprintln!(
"not shrinking from {} to {}",
self.instances.len(),
new_size
)
}
}
fn rescale_threshold(&mut self) {
let diff = self.next_gc_threshold - self.allocations;
let old_threshold = self.next_gc_threshold as f32;
let slope: f32 = (MIN_SCALING_FACTOR - MAX_SCALING_FACTOR) / old_threshold;
let scaling_factor = slope * (diff as f32) + MAX_SCALING_FACTOR;
let new_threshold = old_threshold * scaling_factor;
self.next_gc_threshold = 1 + new_threshold as usize;
if DEBUG_GC {
eprintln!(
"Scaled GC threshold from {} to {}",
old_threshold, self.next_gc_threshold
);
}
}
fn collect_garbage(&mut self, stack: &[Value], globals: &[Global]) {
if DEBUG_GC {
eprintln!("--- gc begin")
}
self.mark_roots(stack, globals);
self.mark_grey();
let shrinkable_to = self.sweep();
if let Some(new_size) = shrinkable_to {
self.shrink(new_size);
}
if DEBUG_GC {
eprintln!(
"After collection | vec_size = {} | allocations = {} | collected = {}",
self.instances.len(),
self.allocations,
self.next_gc_threshold - self.allocations
);
}
self.rescale_threshold();
if DEBUG_GC {
dbg!(&self.instances);
eprintln!("--- gc end")
}
}
pub fn new() -> GC {
GC {
grey_worklist: Vec::new(),
instances: Vec::new(),
free_slots: BinaryHeap::new(),
allocations: 0,
next_gc_threshold: INIT_GC_THRESHOLD,
}
}
}