1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
//! A simpler implementation of [::zerogc::CollectorContext]
//! that doesn't support multiple threads/contexts.
//!
//! In exchange, there is no locking :)

use std::cell::{Cell, UnsafeCell, RefCell};
use std::mem::ManuallyDrop;
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;

use slog::{Logger, FnValue, trace, o};

use crate::{CollectorRef, ShadowStack, ContextState};
use crate::collector::RawCollectorImpl;

/// Manages coordination of garbage collections
///
/// This is factored out of the main code mainly due to
/// differences from single-threaded collection
pub struct CollectionManager<C: RawCollectorImpl> {
    /// Implicit collector ref
    _marker: PhantomData<CollectorRef<C>>,
    /// Access to the internal state
    state: RefCell<CollectorState>,
    /// Whether a collection is currently in progress
    ///
    /// Used for debugging only
    collecting: Cell<bool>,
    /// Sanity check to ensure there's only one context
    has_existing_context: Cell<bool>,
}
impl<C: RawCollectorImpl> super::sealed::Sealed for CollectionManager<C> {}
unsafe impl<C> super::CollectionManager<C> for CollectionManager<C>
    where C: RawCollectorImpl<Manager=Self, RawContext=RawContext<C>> {
    type Context = RawContext<C>;

    fn new() -> Self {
        assert!(!C::SYNC);
        CollectionManager {
            _marker: PhantomData,
            state: RefCell::new(CollectorState::new()),
            collecting: Cell::new(false),
            has_existing_context: Cell::new(false)
        }
    }
    #[inline]
    fn is_collecting(&self) -> bool {
        self.collecting.get()
    }
    #[inline]
    fn should_trigger_collection(&self) -> bool {
        /*
         * Unlike the sync context manager, we can assume
         * there is only a single thread.
         * Therefore we don't need to check for other threads
         * having a collection in progress when we're at a safepoint.
         *
         * If we were having a collection, control flow is already
         * taken over by the collector ;)
         */
        false
    }
    unsafe fn freeze_context(&self, context: &RawContext<C>) {
        debug_assert_eq!(context.state.get(), ContextState::Active);
        unimplemented!("Freezing single-threaded contexts")
    }
    unsafe fn unfreeze_context(&self, _context: &RawContext<C>) {
        // We can't freeze, so we sure can't unfreeze
        unreachable!("Can't unfreeze a single-threaded context")
    }

    fn prevent_collection<R>(_collector: &C, _func: impl FnOnce() -> R) -> R {
        unimplemented!("Preventing collections for non-sync collectors")
    }

    #[inline]
    unsafe fn free_context(_collector: &C, _context: *mut Self::Context) {
        assert!(!C::SYNC);
        // No extra work to do - automatic Drop handles everything
    }
}
pub struct RawContext<C: RawCollectorImpl> {
    /// Since we're the only context, we should (logically)
    /// be the only owner.
    ///
    /// This is still an Arc for easier use alongside the
    /// thread-safe implementation
    pub(crate) collector: CollectorRef<C>,
    // NOTE: We are Send, not Sync
    pub(super) shadow_stack: UnsafeCell<ShadowStack<C>>,
    // TODO: Does the collector access this async?
    pub(super) state: Cell<ContextState>,
    logger: Logger
}
impl<C: RawCollectorImpl> Debug for RawContext<C> {
    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
        f.debug_struct("RawContext")
            .field(
                "collector",
                &format_args!("{:p}", &self.collector)
            )
            .field(
                "shadow_stacks",
                // We're assuming this is valid....
                unsafe { &*self.shadow_stack.get() }
            )
            .field("state", &self.state.get())
            .finish()
    }
}
impl<C: RawCollectorImpl> super::sealed::Sealed for RawContext<C> {}
unsafe impl<C> super::RawContext<C> for RawContext<C>
    where C: RawCollectorImpl<RawContext=Self, Manager=CollectionManager<C>> {
    unsafe fn register_new(collector: &CollectorRef<C>) -> ManuallyDrop<Box<Self>> {
        assert!(!C::SYNC);
        // NOTE: Nosync collector must have only **ONE** context
        assert!(
            !collector.as_raw().manager().has_existing_context
                .replace(true),
            "Already created a context for the collector!"
        );
        // Assume ownership
        let collector = collector.clone_internal();
        let logger = collector.as_raw().logger().new(o!());
        let context = ManuallyDrop::new(Box::new(RawContext {
            logger: logger.clone(), collector,
            shadow_stack: UnsafeCell::new(ShadowStack {
                last: std::ptr::null_mut(),
            }),
            state: Cell::new(ContextState::Active)
        }));
        trace!(
            logger, "Initializing context";
            "ptr" => format_args!("{:p}", &**context),
        );
        context
    }
    #[cold]
    #[inline(never)]
    unsafe fn trigger_safepoint(&self) {
        /*
         * Begin a collection.
         *
         * Since we are the only collector we don't have
         * to worry about awaiting other threads stopping
         * at a safepoint.
         * This simplifies the implementation considerably.
         */
        assert!(!self.collector.as_raw().manager().collecting.get());
        self.collector.as_raw().manager().collecting.set(true);
        let collection_id = self.collector.as_raw().manager().state.borrow_mut()
            .next_pending_id();
        trace!(
            self.logger,
            "Creating collector";
            "id" => collection_id,
            "ctx_ptr" => format_args!("{:?}", self)
        );
        let shadow_stack = &*self.shadow_stack.get();
        let ptr = self as *const RawContext<C> as *mut RawContext<C>;
        // Change our state to mark we are now waiting at a safepoint
        assert_eq!(self.state.replace(ContextState::SafePoint {
            collection_id,
        }), ContextState::Active);
        trace!(
            self.logger, "Beginning collection";
            "ptr" => ?ptr,
            "shadow_stack" => FnValue(|_| format!("{:?}", shadow_stack.as_vec())),
            "state" => ?self.state,
            "collection_id" => collection_id,
            "original_size" => %self.collector.as_raw().allocated_size(),
        );
        self.collector.as_raw().perform_raw_collection(&[ptr]);
        assert_eq!(
            self.state.replace(ContextState::Active),
            ContextState::SafePoint { collection_id }
        );
        assert!(self.collector.as_raw().manager().collecting.replace(false));
    }

    #[inline]
    fn shadow_stack_ptr(&self) -> *mut ShadowStack<C> {
        self.shadow_stack.get()
    }

    #[inline]
    unsafe fn collector(&self) -> &C {
        self.collector.as_raw()
    }

    #[inline]
    fn state(&self) -> ContextState {
        self.state.get()
    }
}

// Pending collections

/// Keeps track of a pending collection (if any)
///
/// This must be held under a write lock for a collection to happen.
/// This must be held under a read lock to prevent collections.
pub struct CollectorState {
    next_pending_id: u64
}
impl CollectorState {
    pub fn new() -> Self {
        CollectorState {
            next_pending_id: 0
        }
    }
    fn next_pending_id(&mut self) -> u64 {
        let id = self.next_pending_id;
        self.next_pending_id = id.checked_add(1)
            .expect("Overflow");
        id
    }
}