use std::{
collections::{HashSet, VecDeque},
ptr::NonNull,
sync::{Arc, Mutex},
thread,
time::{Duration, Instant},
};
use typescript_types::TsValue;
use crate::memory::{AllocationStrategy, Allocator, AllocatorFactory};
use super::{
event::GCEventHandler,
header::ObjectHeader,
object::GcObject,
phase::GCPhase,
stats::GCStats,
work_stealing::{RememberedSetEntry, WorkStealingQueue},
};
#[derive(Debug, Clone)]
pub struct TimeBudget {
max_step_time_us: u64,
mark_budget_us: u64,
sweep_budget_us: u64,
compact_budget_us: u64,
target_max_pause_us: u64,
adaptive_factor: f64,
history: Vec<(u64, u64)>, max_history_size: usize,
step_adjustment_factor: f64,
min_step_size: usize,
max_step_size: usize,
}
impl Default for TimeBudget {
fn default() -> Self {
Self {
max_step_time_us: 1000,
mark_budget_us: 500,
sweep_budget_us: 300,
compact_budget_us: 200,
target_max_pause_us: 5000,
adaptive_factor: 1.0,
history: Vec::with_capacity(10),
max_history_size: 10,
step_adjustment_factor: 1.0,
min_step_size: 50,
max_step_size: 500,
}
}
}
impl TimeBudget {
pub fn new() -> Self {
Self::default()
}
pub fn with_config(max_step_time_us: u64, target_max_pause_us: u64) -> Self {
Self {
max_step_time_us,
mark_budget_us: max_step_time_us / 2,
sweep_budget_us: max_step_time_us / 3,
compact_budget_us: max_step_time_us / 5,
target_max_pause_us,
adaptive_factor: 1.0,
history: Vec::with_capacity(10),
max_history_size: 10,
step_adjustment_factor: 1.0,
min_step_size: 50,
max_step_size: 500,
}
}
pub fn set_max_step_time(&mut self, time_us: u64) {
self.max_step_time_us = time_us;
self.mark_budget_us = time_us / 2;
self.sweep_budget_us = time_us / 3;
self.compact_budget_us = time_us / 5;
}
pub fn set_target_max_pause(&mut self, time_us: u64) {
self.target_max_pause_us = time_us;
}
pub fn max_step_time(&self) -> u64 {
self.max_step_time_us
}
pub fn mark_budget(&self) -> u64 {
(self.mark_budget_us as f64 * self.adaptive_factor) as u64
}
pub fn sweep_budget(&self) -> u64 {
(self.sweep_budget_us as f64 * self.adaptive_factor) as u64
}
pub fn compact_budget(&self) -> u64 {
(self.compact_budget_us as f64 * self.adaptive_factor) as u64
}
pub fn adapt(&mut self, actual_time_us: u64, budget_time_us: u64) {
self.history.push((budget_time_us, actual_time_us));
if self.history.len() > self.max_history_size {
self.history.remove(0);
}
if !self.history.is_empty() {
let total_ratio: f64 = self
.history
.iter()
.filter(|(budget, _)| *budget > 0)
.map(|(budget, actual)| (*actual as f64) / (*budget as f64))
.sum();
let count = self.history.iter().filter(|(budget, _)| *budget > 0).count() as f64;
if count > 0.0 {
let avg_ratio = total_ratio / count;
if avg_ratio > 1.0 {
self.adaptive_factor *= 0.9;
}
else if avg_ratio < 0.5 {
self.adaptive_factor *= 1.1;
}
self.adaptive_factor = self.adaptive_factor.clamp(0.5, 2.0);
self.step_adjustment_factor = 1.0 / avg_ratio;
self.step_adjustment_factor = self.step_adjustment_factor.clamp(0.5, 2.0);
}
}
}
pub fn recommended_step_size(&self, current_step_size: usize) -> usize {
let new_step = (current_step_size as f64 * self.step_adjustment_factor) as usize;
new_step.clamp(self.min_step_size, self.max_step_size)
}
pub fn set_step_range(&mut self, min_step: usize, max_step: usize) {
self.min_step_size = min_step;
self.max_step_size = max_step;
}
pub fn step_adjustment_factor(&self) -> f64 {
self.step_adjustment_factor
}
pub fn clear_history(&mut self) {
self.history.clear();
}
}
#[derive(Debug, Clone)]
pub struct PromotionPolicy {
age_threshold: u8,
large_object_threshold: usize,
small_object_threshold: usize,
dynamic_age_enabled: bool,
target_survival_rate: f64,
max_promotion_rate: f64,
survival_rate_history: Vec<f64>,
max_history_size: usize,
size_based_promotion: bool,
access_based_promotion: bool,
min_age_threshold: u8,
max_age_threshold: u8,
}
impl Default for PromotionPolicy {
fn default() -> Self {
Self {
age_threshold: 3,
large_object_threshold: 1024,
small_object_threshold: 64,
dynamic_age_enabled: true,
target_survival_rate: 0.5,
max_promotion_rate: 0.3,
survival_rate_history: Vec::with_capacity(10),
max_history_size: 10,
size_based_promotion: true,
access_based_promotion: true,
min_age_threshold: 1,
max_age_threshold: 15,
}
}
}
impl PromotionPolicy {
pub fn new() -> Self {
Self::default()
}
pub fn with_config(age_threshold: u8, large_object_threshold: usize) -> Self {
Self { age_threshold, large_object_threshold, ..Self::default() }
}
pub fn set_age_threshold(&mut self, threshold: u8) {
self.age_threshold = threshold.clamp(self.min_age_threshold, self.max_age_threshold);
}
pub fn set_large_object_threshold(&mut self, threshold: usize) {
self.large_object_threshold = threshold;
}
pub fn set_small_object_threshold(&mut self, threshold: usize) {
self.small_object_threshold = threshold;
}
pub fn set_dynamic_age_enabled(&mut self, enabled: bool) {
self.dynamic_age_enabled = enabled;
}
pub fn set_size_based_promotion(&mut self, enabled: bool) {
self.size_based_promotion = enabled;
}
pub fn set_access_based_promotion(&mut self, enabled: bool) {
self.access_based_promotion = enabled;
}
pub fn record_survival_rate(&mut self, survival_rate: f64) {
self.survival_rate_history.push(survival_rate);
if self.survival_rate_history.len() > self.max_history_size {
self.survival_rate_history.remove(0);
}
if self.dynamic_age_enabled && self.survival_rate_history.len() >= 3 {
let avg_rate: f64 = self.survival_rate_history.iter().sum::<f64>() / self.survival_rate_history.len() as f64;
if avg_rate > self.target_survival_rate + 0.1 && self.age_threshold < self.max_age_threshold {
self.age_threshold += 1;
}
else if avg_rate < self.target_survival_rate - 0.1 && self.age_threshold > self.min_age_threshold {
self.age_threshold -= 1;
}
if avg_rate > self.target_survival_rate + 0.2 {
self.large_object_threshold = (self.large_object_threshold as f64 * 1.1) as usize;
}
else if avg_rate < self.target_survival_rate - 0.2 {
self.large_object_threshold = (self.large_object_threshold as f64 * 0.9) as usize;
}
}
}
pub fn should_promote(&self, header: &ObjectHeader) -> bool {
if header.age >= self.age_threshold {
return true;
}
if self.size_based_promotion && header.size >= self.large_object_threshold {
return true;
}
if self.size_based_promotion && header.size <= self.small_object_threshold && header.age < self.age_threshold + 2 {
return false;
}
if self.access_based_promotion && header.access_count > 5 {
return true;
}
false
}
pub fn age_threshold(&self) -> u8 {
self.age_threshold
}
pub fn large_object_threshold(&self) -> usize {
self.large_object_threshold
}
pub fn small_object_threshold(&self) -> usize {
self.small_object_threshold
}
pub fn set_age_range(&mut self, min_age: u8, max_age: u8) {
self.min_age_threshold = min_age;
self.max_age_threshold = max_age;
self.age_threshold = self.age_threshold.clamp(self.min_age_threshold, self.max_age_threshold);
}
pub fn size_based_promotion(&self) -> bool {
self.size_based_promotion
}
pub fn access_based_promotion(&self) -> bool {
self.access_based_promotion
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum WriteBarrierType {
None,
CardTable,
SnapshotAtTheBeginning,
}
#[derive(Debug, Clone)]
pub struct WriteBarrier {
barrier_type: WriteBarrierType,
card_table: Vec<u8>,
card_size: usize,
dirty_card_count: usize,
batch_update_enabled: bool,
batch_buffer: Vec<RememberedSetEntry>,
batch_threshold: usize,
last_batch_update: std::time::Instant,
batch_time_threshold: u64,
seen_entries: std::collections::HashSet<(usize, usize)>,
deduplication_enabled: bool,
}
impl Default for WriteBarrier {
fn default() -> Self {
Self {
barrier_type: WriteBarrierType::CardTable,
card_table: Vec::new(),
card_size: 256, dirty_card_count: 0,
batch_update_enabled: true,
batch_buffer: Vec::with_capacity(200), batch_threshold: 100, last_batch_update: std::time::Instant::now(),
batch_time_threshold: 1, seen_entries: std::collections::HashSet::with_capacity(100),
deduplication_enabled: true,
}
}
}
impl WriteBarrier {
pub fn new() -> Self {
Self::default()
}
pub fn with_type(barrier_type: WriteBarrierType) -> Self {
Self { barrier_type, ..Self::default() }
}
pub fn init_card_table(&mut self, heap_size: usize) {
let card_count = (heap_size + self.card_size - 1) / self.card_size;
self.card_table = vec![0; card_count];
self.dirty_card_count = 0;
}
pub fn mark_card_dirty(&mut self, address: usize) {
if self.barrier_type != WriteBarrierType::CardTable {
return;
}
let card_index = address / self.card_size;
if card_index < self.card_table.len() && self.card_table[card_index] == 0 {
self.card_table[card_index] = 1;
self.dirty_card_count += 1;
}
}
pub fn is_card_dirty(&self, address: usize) -> bool {
let card_index = address / self.card_size;
card_index < self.card_table.len() && self.card_table[card_index] != 0
}
pub fn clear_dirty_cards(&mut self) {
for i in 0..self.card_table.len() {
self.card_table[i] = 0;
}
self.dirty_card_count = 0;
}
pub fn dirty_card_count(&self) -> usize {
self.dirty_card_count
}
pub fn add_remembered_entry(&mut self, entry: RememberedSetEntry) -> Option<Vec<RememberedSetEntry>> {
if !self.batch_update_enabled {
return Some(vec![entry]);
}
if self.deduplication_enabled {
let key = (entry.old_obj.as_ptr() as usize, entry.young_obj.as_ptr() as usize);
if self.seen_entries.contains(&key) {
return None; }
self.seen_entries.insert(key);
}
self.batch_buffer.push(entry);
let elapsed = self.last_batch_update.elapsed().as_millis() as u64;
if self.batch_buffer.len() >= self.batch_threshold || elapsed >= self.batch_time_threshold {
let result = Some(self.flush_batch_buffer());
self.last_batch_update = std::time::Instant::now();
result
}
else {
None
}
}
pub fn flush_batch_buffer(&mut self) -> Vec<RememberedSetEntry> {
let result = self.batch_buffer.clone();
self.batch_buffer.clear();
self.seen_entries.clear(); result
}
pub fn set_barrier_type(&mut self, barrier_type: WriteBarrierType) {
self.barrier_type = barrier_type;
}
pub fn barrier_type(&self) -> WriteBarrierType {
self.barrier_type
}
pub fn set_batch_update_enabled(&mut self, enabled: bool) {
self.batch_update_enabled = enabled;
}
pub fn set_batch_threshold(&mut self, threshold: usize) {
self.batch_threshold = threshold;
}
pub fn set_batch_time_threshold(&mut self, threshold_ms: u64) {
self.batch_time_threshold = threshold_ms;
}
pub fn set_deduplication_enabled(&mut self, enabled: bool) {
self.deduplication_enabled = enabled;
}
pub fn deduplication_enabled(&self) -> bool {
self.deduplication_enabled
}
}
#[derive(Debug, Clone)]
pub struct PauseTimeRecord {
pub start_time: Instant,
pub end_time: Instant,
pub phase: GCPhase,
pub duration_us: u64,
pub objects_processed: usize,
pub bytes_collected: usize,
}
impl PauseTimeRecord {
pub fn new(phase: GCPhase, duration_us: u64) -> Self {
Self {
start_time: Instant::now(),
end_time: Instant::now(),
phase,
duration_us,
objects_processed: 0,
bytes_collected: 0,
}
}
pub fn with_details(
start_time: Instant,
end_time: Instant,
phase: GCPhase,
objects_processed: usize,
bytes_collected: usize,
) -> Self {
let duration_us = end_time.duration_since(start_time).as_micros() as u64;
Self { start_time, end_time, phase, duration_us, objects_processed, bytes_collected }
}
}
#[derive(Debug, Clone, Default)]
pub struct PauseTimeReport {
pub pause_records: Vec<PauseTimeRecord>,
pub max_pause_us: u64,
pub min_pause_us: u64,
pub avg_pause_us: u64,
pub total_pause_us: u64,
pub pause_count: usize,
pub over_target_count: usize,
pub target_pause_us: u64,
}
impl PauseTimeReport {
pub fn new() -> Self {
Self::default()
}
pub fn set_target_pause(&mut self, target_us: u64) {
self.target_pause_us = target_us;
}
pub fn add_record(&mut self, record: PauseTimeRecord) {
if record.duration_us > self.max_pause_us {
self.max_pause_us = record.duration_us;
}
if self.min_pause_us == 0 || record.duration_us < self.min_pause_us {
self.min_pause_us = record.duration_us;
}
self.total_pause_us += record.duration_us;
self.pause_count += 1;
self.avg_pause_us = self.total_pause_us / self.pause_count as u64;
if record.duration_us > self.target_pause_us {
self.over_target_count += 1;
}
self.pause_records.push(record);
}
pub fn compliance_rate(&self) -> f64 {
if self.pause_count == 0 {
return 1.0;
}
(self.pause_count - self.over_target_count) as f64 / self.pause_count as f64
}
pub fn percentile(&self, percentile: u8) -> u64 {
if self.pause_records.is_empty() {
return 0;
}
let mut durations: Vec<u64> = self.pause_records.iter().map(|r| r.duration_us).collect();
durations.sort();
let index = ((percentile as usize) * durations.len() / 100).min(durations.len() - 1);
durations[index]
}
pub fn clear(&mut self) {
self.pause_records.clear();
self.max_pause_us = 0;
self.min_pause_us = 0;
self.avg_pause_us = 0;
self.total_pause_us = 0;
self.pause_count = 0;
self.over_target_count = 0;
}
pub fn summary(&self) -> String {
format!(
"GC Pause Time Report:\n\
- Total pauses: {}\n\
- Total pause time: {} us\n\
- Average pause time: {} us\n\
- Max pause time: {} us\n\
- Min pause time: {} us\n\
- P99 pause time: {} us\n\
- P95 pause time: {} us\n\
- Compliance rate: {:.2}%\n\
- Over target count: {}",
self.pause_count,
self.total_pause_us,
self.avg_pause_us,
self.max_pause_us,
self.min_pause_us,
self.percentile(99),
self.percentile(95),
self.compliance_rate() * 100.0,
self.over_target_count
)
}
}
pub struct GC {
roots: Vec<NonNull<GcObject>>,
heap_objects: Vec<NonNull<GcObject>>,
young_generation: Vec<NonNull<GcObject>>,
old_generation: Vec<NonNull<GcObject>>,
marked_objects: HashSet<usize>,
work_list: VecDeque<NonNull<GcObject>>,
work_queues: Vec<Arc<WorkStealingQueue>>,
phase: GCPhase,
stats: GCStats,
event_handlers: Vec<Box<dyn GCEventHandler>>,
incremental_step_size: usize,
young_gen_threshold: usize,
old_gen_threshold: usize,
remembered_set: Vec<RememberedSetEntry>,
allocator: Box<dyn Allocator>,
total_memory: usize,
used_memory: usize,
parallelism: usize,
free_blocks: Vec<(usize, usize)>,
concurrent_enabled: bool,
concurrent_mark_thread: Option<thread::JoinHandle<()>>,
concurrent_sweep_thread: Option<thread::JoinHandle<()>>,
concurrent_mark_done: bool,
concurrent_sweep_done: bool,
time_budget: TimeBudget,
promotion_policy: PromotionPolicy,
write_barrier: WriteBarrier,
pause_report: PauseTimeReport,
step_start_time: Option<Instant>,
}
impl GC {
pub fn new() -> Self {
Self::with_strategy(AllocationStrategy::Default)
}
pub fn with_strategy(strategy: AllocationStrategy) -> Self {
let parallelism = thread::available_parallelism().unwrap_or(std::num::NonZeroUsize::new(1).unwrap()).get();
let mut work_queues = Vec::with_capacity(parallelism);
for _ in 0..parallelism {
work_queues.push(Arc::new(WorkStealingQueue::new()));
}
let allocator = AllocatorFactory::create(strategy);
Self {
roots: Vec::new(),
heap_objects: Vec::new(),
young_generation: Vec::new(),
old_generation: Vec::new(),
marked_objects: HashSet::new(),
work_list: VecDeque::new(),
work_queues,
phase: GCPhase::Idle,
stats: GCStats::default(),
event_handlers: Vec::new(),
incremental_step_size: 200,
young_gen_threshold: 800,
old_gen_threshold: 8000,
remembered_set: Vec::new(),
allocator,
total_memory: 0,
used_memory: 0,
parallelism,
free_blocks: Vec::new(),
concurrent_enabled: false,
concurrent_mark_thread: None,
concurrent_sweep_thread: None,
concurrent_mark_done: false,
concurrent_sweep_done: false,
time_budget: TimeBudget::new(),
promotion_policy: PromotionPolicy::new(),
write_barrier: WriteBarrier::new(),
pause_report: PauseTimeReport::new(),
step_start_time: None,
}
}
pub fn with_buddy_allocator() -> Self {
Self::with_strategy(AllocationStrategy::Buddy)
}
pub fn set_allocation_strategy(&mut self, strategy: AllocationStrategy) {
let new_allocator = AllocatorFactory::create(strategy);
self.allocator = new_allocator;
let stats = self.allocator.stats();
self.total_memory = stats.total_allocated;
self.used_memory = stats.total_used;
}
pub fn set_incremental_step_size(&mut self, size: usize) {
self.incremental_step_size = size;
}
pub fn set_young_gen_threshold(&mut self, threshold: usize) {
self.young_gen_threshold = threshold;
}
pub fn set_old_gen_threshold(&mut self, threshold: usize) {
self.old_gen_threshold = threshold;
}
pub fn set_time_budget(&mut self, budget: TimeBudget) {
self.time_budget = budget;
self.pause_report.set_target_pause(self.time_budget.target_max_pause_us);
}
pub fn time_budget(&self) -> &TimeBudget {
&self.time_budget
}
pub fn time_budget_mut(&mut self) -> &mut TimeBudget {
&mut self.time_budget
}
pub fn set_promotion_policy(&mut self, policy: PromotionPolicy) {
self.promotion_policy = policy;
}
pub fn promotion_policy(&self) -> &PromotionPolicy {
&self.promotion_policy
}
pub fn promotion_policy_mut(&mut self) -> &mut PromotionPolicy {
&mut self.promotion_policy
}
pub fn set_write_barrier_type(&mut self, barrier_type: WriteBarrierType) {
self.write_barrier.set_barrier_type(barrier_type);
}
pub fn write_barrier(&self) -> &WriteBarrier {
&self.write_barrier
}
pub fn write_barrier_mut(&mut self) -> &mut WriteBarrier {
&mut self.write_barrier
}
pub fn pause_report(&self) -> &PauseTimeReport {
&self.pause_report
}
pub fn pause_report_mut(&mut self) -> &mut PauseTimeReport {
&mut self.pause_report
}
pub fn add_event_handler<H: GCEventHandler + 'static>(&mut self, handler: Box<H>) {
self.event_handlers.push(handler);
}
fn notify_gc_start(&self, phase: GCPhase) {
for handler in &self.event_handlers {
handler.on_gc_start(phase);
}
}
fn notify_gc_end(&self, phase: GCPhase) {
for handler in &self.event_handlers {
handler.on_gc_end(phase, &self.stats);
}
}
fn notify_object_collected(&self, size: usize) {
for handler in &self.event_handlers {
handler.on_object_collected(size);
}
}
fn allocate_memory(&mut self, size: usize) -> Option<*mut u8> {
if let Some(ptr) = self.allocator.allocate(size) {
self.used_memory += size;
self.total_memory = self.allocator.stats().total_allocated;
Some(ptr.as_ptr())
}
else {
None
}
}
fn free_memory(&mut self, ptr: *mut u8, size: usize) {
if let Some(non_null_ptr) = NonNull::new(ptr) {
self.allocator.deallocate(non_null_ptr, size);
self.used_memory -= size;
self.total_memory = self.allocator.stats().total_allocated;
}
}
pub fn allocate(&mut self, value: TsValue, size: usize, type_id: u8) -> NonNull<GcObject> {
let total_size = std::mem::size_of::<GcObject>() + size;
self.stats.record_allocation(total_size);
if let Some(ptr) = self.allocate_memory(total_size) {
let obj_ptr = ptr as *mut GcObject;
unsafe {
obj_ptr.write(GcObject::new(value, size, type_id));
let non_null_ptr = NonNull::new(obj_ptr).unwrap();
self.heap_objects.push(non_null_ptr);
self.young_generation.push(non_null_ptr);
if self.young_generation.len() >= self.young_gen_threshold {
self.collect_young_gen();
}
return non_null_ptr;
}
}
let obj = Box::new(GcObject::new(value, size, type_id));
let ptr = NonNull::from(Box::leak(obj));
self.heap_objects.push(ptr);
self.young_generation.push(ptr);
if self.young_generation.len() >= self.young_gen_threshold {
self.collect_young_gen();
}
ptr
}
pub fn add_root(&mut self, mut obj: NonNull<GcObject>) {
self.roots.push(obj);
unsafe {
obj.as_mut().header.increment_ref_count();
}
}
pub fn remove_root(&mut self, mut obj: NonNull<GcObject>) {
if let Some(pos) = self.roots.iter().position(|&r| r == obj) {
self.roots.remove(pos);
unsafe {
obj.as_mut().header.decrement_ref_count();
}
}
}
pub fn collect(&mut self) {
let start_time = std::time::Instant::now();
self.notify_gc_start(GCPhase::Marking);
self.phase = GCPhase::Marking;
self.mark_all();
self.notify_gc_end(GCPhase::Marking);
self.notify_gc_start(GCPhase::Sweeping);
self.phase = GCPhase::Sweeping;
let collected = self.sweep();
self.notify_gc_end(GCPhase::Sweeping);
self.notify_gc_start(GCPhase::Compacting);
self.phase = GCPhase::Compacting;
self.compact();
self.notify_gc_end(GCPhase::Compacting);
self.phase = GCPhase::Idle;
let duration = start_time.elapsed().as_micros() as u64;
self.stats.record_collection(duration, self.marked_objects.len(), collected.0, collected.1);
self.stats.update_fragmentation(self.used_memory, self.total_memory);
let record =
PauseTimeRecord::with_details(start_time, Instant::now(), GCPhase::Idle, self.marked_objects.len(), collected.1);
self.pause_report.add_record(record);
self.marked_objects.clear();
self.remembered_set.clear();
}
pub fn collect_incremental(&mut self) -> bool {
self.collect_incremental_with_budget()
}
pub fn collect_incremental_with_budget(&mut self) -> bool {
let step_start = Instant::now();
self.step_start_time = Some(step_start);
let result = match self.phase {
GCPhase::Idle => {
self.notify_gc_start(GCPhase::Marking);
self.phase = GCPhase::Marking;
self.init_mark();
false
}
GCPhase::Marking => {
let budget = self.time_budget.mark_budget();
let done = self.mark_incremental_with_budget(budget);
let elapsed = step_start.elapsed().as_micros() as u64;
self.time_budget.adapt(elapsed, budget);
self.incremental_step_size = self.time_budget.recommended_step_size(self.incremental_step_size);
if done {
self.notify_gc_end(GCPhase::Marking);
self.notify_gc_start(GCPhase::Sweeping);
self.phase = GCPhase::Sweeping;
self.init_sweep();
}
false
}
GCPhase::Sweeping => {
let budget = self.time_budget.sweep_budget();
let (done, objects_processed, bytes_collected) = self.sweep_incremental_with_budget(budget);
let elapsed = step_start.elapsed().as_micros() as u64;
self.time_budget.adapt(elapsed, budget);
self.incremental_step_size = self.time_budget.recommended_step_size(self.incremental_step_size);
if done {
self.notify_gc_end(GCPhase::Sweeping);
self.notify_gc_start(GCPhase::Compacting);
self.phase = GCPhase::Compacting;
let record = PauseTimeRecord::with_details(
self.step_start_time.unwrap_or(step_start),
Instant::now(),
GCPhase::Sweeping,
objects_processed,
bytes_collected,
);
self.pause_report.add_record(record);
}
false
}
GCPhase::Compacting => {
let budget = self.time_budget.compact_budget();
let done = self.compact_incremental_with_budget(budget);
let elapsed = step_start.elapsed().as_micros() as u64;
self.time_budget.adapt(elapsed, budget);
self.incremental_step_size = self.time_budget.recommended_step_size(self.incremental_step_size);
if done {
self.notify_gc_end(GCPhase::Compacting);
self.phase = GCPhase::Idle;
self.marked_objects.clear();
self.remembered_set.clear();
let record = PauseTimeRecord::new(GCPhase::Compacting, elapsed);
self.pause_report.add_record(record);
return true;
}
false
}
GCPhase::ConcurrentMarking => false,
GCPhase::ConcurrentSweeping => false,
};
self.step_start_time = None;
result
}
pub fn collect_young_gen(&mut self) {
let start_time = std::time::Instant::now();
self.marked_objects.clear();
self.work_list.clear();
let roots: Vec<_> = self.roots.iter().copied().collect();
for root in roots {
self.mark_object(root);
}
let remembered_set_copy = self.remembered_set.clone();
for entry in &remembered_set_copy {
self.mark_object(entry.old_obj);
}
while let Some(obj) = self.work_list.pop_front() {
self.process_object_references(obj);
}
let mut collected_count = 0;
let mut collected_bytes = 0;
let mut survivors = Vec::new();
let mut promoted_count = 0;
let mut promoted_bytes = 0;
for &obj in &self.young_generation {
let ptr = obj.as_ptr() as usize;
unsafe {
if self.marked_objects.contains(&ptr) {
(*obj.as_ptr()).header.increment_age();
if self.should_promote(&(*obj.as_ptr())) {
self.old_generation.push(obj);
promoted_count += 1;
promoted_bytes += (*obj.as_ptr()).total_size();
}
else {
survivors.push(obj);
}
(*obj.as_ptr()).header.mark = false;
}
else {
collected_count += 1;
collected_bytes += (*obj.as_ptr()).total_size();
self.notify_object_collected((*obj.as_ptr()).total_size());
self.heap_objects.retain(|&o| o != obj);
let _ = Box::from_raw(obj.as_ptr());
}
}
}
self.young_generation = survivors;
let survivor_count = self.young_generation.len();
let total_young = survivor_count + collected_count + promoted_count;
let survival_rate = if total_young > 0 { survivor_count as f64 / total_young as f64 } else { 0.0 };
self.promotion_policy.record_survival_rate(survival_rate);
let duration = start_time.elapsed().as_micros() as u64;
self.stats.record_young_gen_collection(
duration,
self.marked_objects.len(),
collected_count,
collected_bytes,
promoted_count,
promoted_bytes,
);
self.stats.update_fragmentation(self.used_memory, self.total_memory);
println!(
"Young gen GC: collected={}, promoted={}, survivors={}, time={}us",
collected_count,
promoted_count,
self.young_generation.len(),
duration
);
let record = PauseTimeRecord::with_details(
start_time,
Instant::now(),
GCPhase::Marking,
self.marked_objects.len(),
collected_bytes,
);
self.pause_report.add_record(record);
self.marked_objects.clear();
self.remembered_set.clear();
}
fn should_promote(&self, obj: &GcObject) -> bool {
self.promotion_policy.should_promote(&obj.header)
}
fn init_mark(&mut self) {
self.marked_objects.clear();
self.work_list.clear();
let roots: Vec<_> = self.roots.iter().copied().collect();
for root in roots {
self.mark_object(root);
}
}
fn mark_incremental(&mut self) -> bool {
let steps = self.incremental_step_size;
for _ in 0..steps {
if let Some(obj) = self.work_list.pop_front() {
self.process_object_references(obj);
}
else {
return true;
}
}
false
}
fn mark_incremental_with_budget(&mut self, budget_us: u64) -> bool {
let start = Instant::now();
let budget = Duration::from_micros(budget_us);
let mut processed = 0;
while start.elapsed() < budget {
if let Some(obj) = self.work_list.pop_front() {
self.process_object_references(obj);
processed += 1;
}
else {
return true;
}
}
false
}
fn mark_parallel(&mut self) {
self.init_mark();
let roots = self.roots.clone();
for root in roots {
self.process_object_references(root);
}
while let Some(obj) = self.work_list.pop_front() {
self.process_object_references(obj);
}
}
fn process_object_in_parallel(
obj: NonNull<GcObject>,
work_queues: &[Arc<WorkStealingQueue>],
marked_objects: &Arc<Mutex<&mut HashSet<usize>>>,
) {
let ptr = obj.as_ptr() as usize;
{
let mut marked = marked_objects.lock().unwrap();
if marked.contains(&ptr) {
return;
}
marked.insert(ptr);
}
unsafe {
(*obj.as_ptr()).header.mark = true;
}
unsafe {
match &(*obj.as_ptr()).value {
TsValue::Object(props) => {
for (_, value) in props {
Self::mark_value_references_parallel(value, work_queues, marked_objects);
}
}
TsValue::Array(elements) => {
for elem in elements {
Self::mark_value_references_parallel(elem, work_queues, marked_objects);
}
}
TsValue::Function(_) => {}
_ => {}
}
}
}
fn mark_value_references_parallel(
value: &TsValue,
work_queues: &[Arc<WorkStealingQueue>],
marked_objects: &Arc<Mutex<&mut HashSet<usize>>>,
) {
match value {
TsValue::Object(props) => {
for (_, val) in props {
Self::mark_value_references_parallel(val, work_queues, marked_objects);
}
}
TsValue::Array(elements) => {
for elem in elements {
Self::mark_value_references_parallel(elem, work_queues, marked_objects);
}
}
TsValue::Function(_) => {}
TsValue::Union(values) => {
for val in values {
Self::mark_value_references_parallel(val, work_queues, marked_objects);
}
}
TsValue::Generic(_, args) => {
for arg in args {
Self::mark_value_references_parallel(arg, work_queues, marked_objects);
}
}
TsValue::Map(entries) => {
for (key, val) in entries {
Self::mark_value_references_parallel(key, work_queues, marked_objects);
Self::mark_value_references_parallel(val, work_queues, marked_objects);
}
}
TsValue::Set(values) => {
for val in values {
Self::mark_value_references_parallel(val, work_queues, marked_objects);
}
}
TsValue::Promise(value) => {
Self::mark_value_references_parallel(value, work_queues, marked_objects);
}
_ => {}
}
}
fn mark_all(&mut self) {
if self.parallelism > 1 && self.heap_objects.len() > 1000 {
self.mark_parallel();
}
else {
self.init_mark();
while let Some(obj) = self.work_list.pop_front() {
self.process_object_references(obj);
}
}
}
fn mark_object(&mut self, obj: NonNull<GcObject>) {
let ptr = obj.as_ptr() as usize;
if self.marked_objects.contains(&ptr) {
return;
}
self.marked_objects.insert(ptr);
unsafe {
(*obj.as_ptr()).header.mark = true;
}
self.work_list.push_back(obj);
}
fn process_object_references(&mut self, obj: NonNull<GcObject>) {
unsafe {
match &(*obj.as_ptr()).value {
TsValue::Object(props) => {
for (_, value) in props {
self.mark_value_references(value);
}
}
TsValue::Array(elements) => {
for elem in elements {
self.mark_value_references(elem);
}
}
TsValue::Function(_) => {}
_ => {}
}
}
}
fn mark_value_references(&mut self, value: &TsValue) {
match value {
TsValue::Object(props) => {
for (_, val) in props {
self.mark_value_references(val);
}
}
TsValue::Array(elements) => {
for elem in elements {
self.mark_value_references(elem);
}
}
TsValue::Function(_) => {}
TsValue::Union(values) => {
for val in values {
self.mark_value_references(val);
}
}
TsValue::Generic(_, args) => {
for arg in args {
self.mark_value_references(arg);
}
}
TsValue::Map(entries) => {
for (key, val) in entries {
self.mark_value_references(key);
self.mark_value_references(val);
}
}
TsValue::Set(values) => {
for val in values {
self.mark_value_references(val);
}
}
TsValue::Promise(value) => {
self.mark_value_references(value);
}
_ => {}
}
}
fn init_sweep(&mut self) {}
fn sweep_incremental(&mut self) -> bool {
const BATCH_SIZE: usize = 150;
let mut _processed = 0;
let mut collected_count = 0;
let mut collected_bytes = 0;
let mut survivors = Vec::new();
for (i, &obj) in self.heap_objects.iter().enumerate() {
if i >= BATCH_SIZE {
break;
}
let ptr = obj.as_ptr() as usize;
unsafe {
if self.marked_objects.contains(&ptr) {
(*obj.as_ptr()).header.mark = false;
survivors.push(obj);
}
else {
collected_count += 1;
collected_bytes += (*obj.as_ptr()).total_size();
self.notify_object_collected((*obj.as_ptr()).total_size());
let _ = Box::from_raw(obj.as_ptr());
}
}
_processed += 1;
}
if _processed > 0 {
let remaining: Vec<_> = self.heap_objects.iter().skip(_processed).copied().collect();
self.heap_objects = survivors;
self.heap_objects.extend(remaining);
self.young_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
self.old_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
let duration = 0;
self.stats.record_collection(duration, self.marked_objects.len(), collected_count, collected_bytes);
}
_processed >= self.heap_objects.len()
}
fn sweep_incremental_with_budget(&mut self, budget_us: u64) -> (bool, usize, usize) {
let start = Instant::now();
let budget = Duration::from_micros(budget_us);
let mut processed = 0;
let mut collected_count = 0;
let mut collected_bytes = 0;
let mut survivors = Vec::new();
for &obj in &self.heap_objects {
if start.elapsed() >= budget {
break;
}
let ptr = obj.as_ptr() as usize;
unsafe {
if self.marked_objects.contains(&ptr) {
(*obj.as_ptr()).header.mark = false;
survivors.push(obj);
}
else {
collected_count += 1;
collected_bytes += (*obj.as_ptr()).total_size();
self.notify_object_collected((*obj.as_ptr()).total_size());
let _ = Box::from_raw(obj.as_ptr());
}
}
processed += 1;
}
if processed > 0 {
let remaining: Vec<_> = self.heap_objects.iter().skip(processed).copied().collect();
self.heap_objects = survivors;
self.heap_objects.extend(remaining);
self.young_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
self.old_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
self.stats.record_collection(
start.elapsed().as_micros() as u64,
self.marked_objects.len(),
collected_count,
collected_bytes,
);
}
let done = processed >= self.heap_objects.len();
(done, processed, collected_bytes)
}
fn sweep_parallel(&mut self) -> (usize, usize) {
let mut collected_count = 0;
let mut collected_bytes = 0;
let mut survivors = Vec::new();
for &obj in &self.heap_objects {
let ptr = obj.as_ptr() as usize;
unsafe {
if self.marked_objects.contains(&ptr) {
(*obj.as_ptr()).header.mark = false;
survivors.push(obj);
}
else {
collected_count += 1;
collected_bytes += (*obj.as_ptr()).total_size();
self.notify_object_collected((*obj.as_ptr()).total_size());
let _ = Box::from_raw(obj.as_ptr());
}
}
}
self.heap_objects = survivors;
self.young_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
self.old_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
(collected_count, collected_bytes)
}
fn sweep(&mut self) -> (usize, usize) {
if self.parallelism > 1 && self.heap_objects.len() > 1000 {
self.sweep_parallel()
}
else {
let mut collected_count = 0;
let mut collected_bytes = 0;
let mut survivors = Vec::new();
for &obj in &self.heap_objects {
let ptr = obj.as_ptr() as usize;
unsafe {
if self.marked_objects.contains(&ptr) {
(*obj.as_ptr()).header.mark = false;
survivors.push(obj);
}
else {
collected_count += 1;
collected_bytes += (*obj.as_ptr()).total_size();
self.notify_object_collected((*obj.as_ptr()).total_size());
let _ = Box::from_raw(obj.as_ptr());
}
}
}
self.heap_objects = survivors;
self.young_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
self.old_generation.retain(|&obj| {
let ptr = obj.as_ptr() as usize;
self.marked_objects.contains(&ptr)
});
(collected_count, collected_bytes)
}
}
fn compact(&mut self) {
if self.heap_objects.is_empty() {
return;
}
let mut new_addresses = Vec::with_capacity(self.heap_objects.len());
let mut current_address = 0;
for &obj in &self.heap_objects {
unsafe {
let size = (*obj.as_ptr()).total_size();
new_addresses.push((obj, current_address));
current_address += size;
}
}
let mut address_map = std::collections::HashMap::new();
for (obj, new_addr) in &new_addresses {
let old_addr = obj.as_ptr() as usize;
address_map.insert(old_addr, *new_addr);
}
self.update_references(&address_map);
self.move_objects(&new_addresses);
self.free_blocks.clear();
}
fn compact_incremental(&mut self) -> bool {
if self.heap_objects.is_empty() {
return true;
}
const BATCH_SIZE: usize = 100;
let batch = self.heap_objects.iter().take(BATCH_SIZE).copied().collect::<Vec<_>>();
if batch.is_empty() {
return true;
}
let mut new_addresses = Vec::with_capacity(batch.len());
let mut current_address = 0;
for &obj in &batch {
unsafe {
let size = (*obj.as_ptr()).total_size();
new_addresses.push((obj, current_address));
current_address += size;
}
}
let mut address_map = std::collections::HashMap::new();
for (obj, new_addr) in &new_addresses {
let old_addr = obj.as_ptr() as usize;
address_map.insert(old_addr, *new_addr);
}
self.update_references(&address_map);
self.move_objects(&new_addresses);
self.heap_objects = self.heap_objects.iter().skip(BATCH_SIZE).copied().collect();
!self.heap_objects.is_empty()
}
fn compact_incremental_with_budget(&mut self, budget_us: u64) -> bool {
if self.heap_objects.is_empty() {
return true;
}
let start = std::time::Instant::now();
let budget = std::time::Duration::from_micros(budget_us);
let mut processed = 0;
let mut new_addresses = Vec::new();
let mut current_address = 0;
for &obj in &self.heap_objects {
if start.elapsed() >= budget {
break;
}
unsafe {
let size = (*obj.as_ptr()).total_size();
new_addresses.push((obj, current_address));
current_address += size;
}
processed += 1;
}
if new_addresses.is_empty() {
return true;
}
let mut address_map = std::collections::HashMap::new();
for (obj, new_addr) in &new_addresses {
let old_addr = obj.as_ptr() as usize;
address_map.insert(old_addr, *new_addr);
}
self.update_references(&address_map);
self.move_objects(&new_addresses);
if processed > 0 {
self.heap_objects = self.heap_objects.iter().skip(processed).copied().collect();
}
!self.heap_objects.is_empty()
}
fn update_references(&mut self, address_map: &std::collections::HashMap<usize, usize>) {
for root in &mut self.roots {
let old_addr = root.as_ptr() as usize;
if let Some(&new_addr) = address_map.get(&old_addr) {
*root = unsafe { NonNull::new(new_addr as *mut GcObject).unwrap() };
}
}
let objects_copy = self.heap_objects.clone();
for &obj in &objects_copy {
unsafe {
match &mut (*obj.as_ptr()).value {
TsValue::Object(props) => {
for (_, value) in props {
Self::update_value_references(value, address_map);
}
}
TsValue::Array(elements) => {
for elem in elements {
Self::update_value_references(elem, address_map);
}
}
_ => {}
}
}
}
}
fn update_value_references(value: &mut TsValue, address_map: &std::collections::HashMap<usize, usize>) {
match value {
TsValue::Object(props) => {
for (_, val) in props {
Self::update_value_references(val, address_map);
}
}
TsValue::Array(elements) => {
for elem in elements {
Self::update_value_references(elem, address_map);
}
}
TsValue::Union(values) => {
for val in values {
Self::update_value_references(val, address_map);
}
}
TsValue::Generic(_, args) => {
for arg in args {
Self::update_value_references(arg, address_map);
}
}
TsValue::Map(entries) => {
for (key, val) in entries {
Self::update_value_references(key, address_map);
Self::update_value_references(val, address_map);
}
}
TsValue::Set(values) => {
for val in values {
Self::update_value_references(val, address_map);
}
}
TsValue::Promise(value) => {
Self::update_value_references(value, address_map);
}
_ => {}
}
}
fn move_objects(&mut self, new_addresses: &[(NonNull<GcObject>, usize)]) {
for (obj, new_addr) in new_addresses {
unsafe {
let size = (*obj.as_ptr()).total_size();
let old_ptr = obj.as_ptr() as *mut u8;
let new_ptr = *new_addr as *mut u8;
std::ptr::copy_nonoverlapping(old_ptr, new_ptr, size);
let _ = Box::from_raw(obj.as_ptr());
}
}
}
pub fn record_cross_generation_reference(&mut self, old_obj: NonNull<GcObject>, young_obj: NonNull<GcObject>) {
let old_addr = old_obj.as_ptr() as usize;
self.write_barrier.mark_card_dirty(old_addr);
let entry = RememberedSetEntry { old_obj, young_obj };
if let Some(entries) = self.write_barrier.add_remembered_entry(entry) {
self.remembered_set.extend(entries);
if self.remembered_set.len() > 1000 {
self.compress_remembered_set();
}
}
}
pub fn flush_write_barrier_buffer(&mut self) {
let entries = self.write_barrier.flush_batch_buffer();
self.remembered_set.extend(entries);
self.compress_remembered_set();
}
fn compress_remembered_set(&mut self) {
if self.remembered_set.len() <= 1 {
return;
}
let mut seen = std::collections::HashSet::with_capacity(self.remembered_set.len());
let mut compressed = Vec::with_capacity(self.remembered_set.len());
for entry in &self.remembered_set {
let key = (entry.old_obj.as_ptr() as usize, entry.young_obj.as_ptr() as usize);
if !seen.contains(&key) {
seen.insert(key);
compressed.push(*entry);
}
}
if compressed.len() < self.remembered_set.len() {
self.remembered_set = compressed;
}
}
fn process_remembered_set_batch(&mut self, batch_size: usize) -> usize {
let batch = self.remembered_set.iter().take(batch_size).copied().collect::<Vec<_>>();
let processed = batch.len();
for entry in batch {
self.mark_object(entry.young_obj);
}
if processed > 0 {
self.remembered_set = self.remembered_set.iter().skip(processed).copied().collect();
}
processed
}
fn clean_remembered_set(&mut self) {
let mut valid_entries = Vec::with_capacity(self.remembered_set.len());
for entry in &self.remembered_set {
valid_entries.push(*entry);
}
self.remembered_set = valid_entries;
}
pub fn stats(&self) -> &GCStats {
&self.stats
}
pub fn heap_size(&self) -> usize {
self.heap_objects.len()
}
pub fn young_gen_size(&self) -> usize {
self.young_generation.len()
}
pub fn old_gen_size(&self) -> usize {
self.old_generation.len()
}
pub fn phase(&self) -> GCPhase {
self.phase
}
pub fn should_collect(&self) -> bool {
self.young_generation.len() >= self.young_gen_threshold
|| self.old_generation.len() >= self.old_gen_threshold
|| (self.total_memory > 0 && (self.used_memory as f64 / self.total_memory as f64) < 0.5)
}
pub fn memory_usage(&self) -> (usize, usize) {
(self.used_memory, self.total_memory)
}
pub fn enable_concurrent(&mut self, enabled: bool) {
self.concurrent_enabled = enabled;
}
pub fn is_concurrent_enabled(&self) -> bool {
self.concurrent_enabled
}
fn start_concurrent_mark(&mut self) {
self.phase = GCPhase::Marking;
self.mark_all();
self.concurrent_mark_done = true;
}
fn start_concurrent_sweep(&mut self) {
self.phase = GCPhase::Sweeping;
self.sweep();
self.concurrent_sweep_done = true;
}
fn check_concurrent_status(&mut self) {
if self.phase == GCPhase::ConcurrentMarking {
if let Some(thread) = self.concurrent_mark_thread.take() {
if thread.join().is_ok() && self.concurrent_mark_done {
self.phase = GCPhase::Marking;
self.concurrent_mark_done = false;
}
}
}
if self.phase == GCPhase::ConcurrentSweeping {
if let Some(thread) = self.concurrent_sweep_thread.take() {
if thread.join().is_ok() && self.concurrent_sweep_done {
self.phase = GCPhase::Sweeping;
self.concurrent_sweep_done = false;
}
}
}
}
fn clone(&self) -> Self {
Self {
roots: self.roots.clone(),
heap_objects: self.heap_objects.clone(),
young_generation: self.young_generation.clone(),
old_generation: self.old_generation.clone(),
marked_objects: self.marked_objects.clone(),
work_list: self.work_list.clone(),
work_queues: self.work_queues.clone(),
phase: self.phase,
stats: self.stats.clone(),
event_handlers: Vec::new(), incremental_step_size: self.incremental_step_size,
young_gen_threshold: self.young_gen_threshold,
old_gen_threshold: self.old_gen_threshold,
remembered_set: self.remembered_set.clone(),
allocator: self.allocator.box_clone(),
total_memory: self.total_memory,
used_memory: self.used_memory,
parallelism: self.parallelism,
free_blocks: self.free_blocks.clone(),
concurrent_enabled: self.concurrent_enabled,
concurrent_mark_thread: None,
concurrent_sweep_thread: None,
concurrent_mark_done: self.concurrent_mark_done,
concurrent_sweep_done: self.concurrent_sweep_done,
time_budget: self.time_budget.clone(),
promotion_policy: self.promotion_policy.clone(),
write_barrier: self.write_barrier.clone(),
pause_report: self.pause_report.clone(),
step_start_time: None,
}
}
pub fn generate_report(&self) -> String {
format!(
"GC Report:\n\
=== Statistics ===\n\
- Collection count: {}\n\
- Total collection time: {} us\n\
- Average collection time: {} us\n\
- Young gen collections: {}\n\
- Old gen collections: {}\n\
- Promoted objects: {}\n\
- Promoted bytes: {}\n\
\n\
=== Pause Time ===\n\
{}\n\
\n\
=== Memory ===\n\
- Heap objects: {}\n\
- Young generation: {}\n\
- Old generation: {}\n\
- Used memory: {} bytes\n\
- Total memory: {} bytes\n\
- Fragmentation: {:.2}%",
self.stats.collection_count,
self.stats.total_collection_time_us,
self.stats.average_collection_time_us(),
self.stats.young_gen_collections,
self.stats.old_gen_collections,
self.stats.promoted_objects,
self.stats.promoted_bytes,
self.pause_report.summary(),
self.heap_objects.len(),
self.young_generation.len(),
self.old_generation.len(),
self.used_memory,
self.total_memory,
self.stats.fragmentation_ratio * 100.0,
)
}
}
impl Default for GC {
fn default() -> Self {
Self::new()
}
}
pub fn process_object_in_parallel_helper(
obj: NonNull<GcObject>,
work_queues: &[Arc<WorkStealingQueue>],
marked_objects: &Arc<Mutex<&mut HashSet<usize>>>,
) {
let ptr = obj.as_ptr() as usize;
{
let mut marked = marked_objects.lock().unwrap();
if marked.contains(&ptr) {
return;
}
marked.insert(ptr);
}
unsafe {
(*obj.as_ptr()).header.mark = true;
}
unsafe {
match &(*obj.as_ptr()).value {
TsValue::Object(props) => {
for (_, value) in props {
mark_value_references_parallel_helper(value, work_queues, marked_objects);
}
}
TsValue::Array(elements) => {
for elem in elements {
mark_value_references_parallel_helper(elem, work_queues, marked_objects);
}
}
TsValue::Function(_) => {}
_ => {}
}
}
}
pub fn mark_value_references_parallel_helper(
value: &TsValue,
work_queues: &[Arc<WorkStealingQueue>],
marked_objects: &Arc<Mutex<&mut HashSet<usize>>>,
) {
match value {
TsValue::Object(props) => {
for (_, val) in props {
mark_value_references_parallel_helper(val, work_queues, marked_objects);
}
}
TsValue::Array(elements) => {
for elem in elements {
mark_value_references_parallel_helper(elem, work_queues, marked_objects);
}
}
TsValue::Function(_) => {}
TsValue::Union(values) => {
for val in values {
mark_value_references_parallel_helper(val, work_queues, marked_objects);
}
}
TsValue::Generic(_, args) => {
for arg in args {
mark_value_references_parallel_helper(arg, work_queues, marked_objects);
}
}
TsValue::Map(entries) => {
for (key, val) in entries {
mark_value_references_parallel_helper(key, work_queues, marked_objects);
mark_value_references_parallel_helper(val, work_queues, marked_objects);
}
}
TsValue::Set(values) => {
for val in values {
mark_value_references_parallel_helper(val, work_queues, marked_objects);
}
}
TsValue::Promise(value) => {
mark_value_references_parallel_helper(value, work_queues, marked_objects);
}
_ => {}
}
}