Skip to main content

mockalloc/
lib.rs

1#![deny(missing_docs)]
2//! Mockalloc is a crate to allow testing code which uses the global allocator. It
3//! uses a probabilistic algorithm to detect and distinguish several kinds of
4//! allocation related bugs:
5//!
6//! - Memory leaks
7//! - Double frees
8//! - Invalid frees (bad pointer)
9//! - Invalid frees (bad size)
10//! - Invalid frees (bad alignment)
11//!
12//! Once a bug is detected, you can enable the `tracing` feature of this crate
13//! to collect detailed information about the problem including backtraces showing
14//! where memory was allocated and freed.
15//!
16//! In the case the memory was leaked, it is also possible to find a list of
17//! backtraces showing possibilities for where we expected the memory to be freed.
18//!
19//! Note: the `tracing` feature incurs a significant performance penalty. (Although it
20//! is significantly faster than running the code under `miri`). You should also be
21//! aware that backtraces are often less complete in release builds where many frames are
22//! optimized out.
23//!
24//! ## Usage
25//!
26//! Typical use involves enabling the `Mockalloc` allocator during tests, eg:
27//!
28//! ```rust
29//! #[cfg(test)]
30//! mod tests {
31//!     use std::alloc::System;
32//!     use mockalloc::Mockalloc;
33//!
34//!     #[global_allocator]
35//!     static ALLOCATOR: Mockalloc<System> = Mockalloc(System);
36//! }
37//! ```
38//!
39//! Once the allocator is enabled, there are several ways to use it in your tests.
40//!
41//! The easiest way is to use the `#[mockalloc::test]` attribute on your tests
42//! instead of the usual `#[test]` attribute:
43//!
44//! ```rust
45//!     #[mockalloc::test]
46//!     fn it_works() {
47//!         // Some code which uses the allocator
48//!     }
49//! ```
50//!
51//! The test will fail if any of the allocation bugs listed above are detected.
52//! The test will also fail with the `NoData` error if no allocations are detected
53//! so that you can be sure that the `Mockalloc` allocator is active.
54//!
55//! You can also use `mockalloc` to test a specific section of code for memory
56//! issues without checking the entire test using the `assert_allocs` function.
57//!
58//! The `#[mockalloc::test]` attribute in the prior example is simply a shorthand
59//! for:
60//!
61//! ```rust
62//!     #[test]
63//!     fn it_works() {
64//!         mockalloc::assert_allocs(|| {
65//!             // Some code which uses the allocator
66//!         });
67//!     }
68//! ```
69//!
70//! It is also possible to make more detailed assertions: for example you may want
71//! to assert that a piece of code performs a specific number of allocations. For
72//! this you can use the `record_allocs` function:
73//!
74//! ```rust
75//!     #[test]
76//!     fn it_works() {
77//!         let alloc_info = mockalloc::record_allocs(|| {
78//!             // Some code which uses the allocator
79//!         });
80//!
81//!         assert_eq!(alloc_info.num_allocs(), 2);
82//!
83//!         // This is what `assert_allocs` does internally:
84//!         alloc_info.result().unwrap()
85//!     }
86//! ```
87//!
88//! ## Limitations
89//!
90//! Allocations are tracked separately for each thread. This allows tests to be
91//! run in parallel, but it means that the library will report false positives
92//! if a pointer returned by an allocation on one thread is later freed by a
93//! different thread.
94//!
95//! When the `tracing` feature is disabled, the algorithm cannot detect where the
96//! bug is, it can only indicate what kind of bug is present.
97//!
98//! ## How it works
99//!
100//! The allocator does its tracking without allocating any memory itself. It
101//! uses a probabilistic algorithm which works by hashing various pieces of
102//! metadata about allocations and frees, and then accumulating these using
103//! a commutative operation so that the order does not affect the result.
104//!
105//! Depending on which of these accumulators returns to zero by the end of
106//! a region under test, different allocation bugs can be distinguished.
107//!
108//! The following metadata is hashed and accumulated:
109//!
110//! - Pointer
111//! - Size & Pointer
112//! - Alignment & Pointer
113//!
114//! In addition to tracking the total number of allocations and frees.
115//!
116//! We can detect memory leaks and double frees by looking for a difference
117//! between the total numbers of allocations and frees.
118//!
119//! Otherwise, if the pointer accumulator does not return to zero, we know that
120//! an invalid pointer was freed.
121//!
122//! Otherwise, we know the right pointers were freed, but maybe with the wrong
123//! size and/or alignment, which we can detect with the other two accumulators.
124//!
125//! If all accumulators returned to zero then we know everything is good.
126//!
127//! Each accumulator and hash is 128 bits to essentially eliminate the chance
128//! of a collision.
129
130use std::alloc::{GlobalAlloc, Layout};
131use std::cell::{Cell, RefCell};
132use std::thread_local;
133
134#[cfg(feature = "tracing")]
135/// Functionality for detailed tracing of allocations. Enabled with the
136/// `tracing` feature.
137pub mod tracing;
138
139// Probably overkill, but performance isn't a huge concern
140fn hash_fn(p: usize) -> u128 {
141    const PRIME1: u128 = 257343791756393576901679996513787191589;
142    const PRIME2: u128 = 271053192961985756828288246809453504189;
143    let mut p = (p as u128).wrapping_add(PRIME2);
144    p = p.wrapping_mul(PRIME1);
145    p = p ^ (p >> 64);
146    p = p.wrapping_mul(PRIME2);
147    p = p ^ (p >> 42);
148    p = p.wrapping_mul(PRIME1);
149    p = p ^ (p >> 25);
150    p
151}
152
153#[derive(Default)]
154struct LocalState {
155    ptr_accum: u128,
156    ptr_size_accum: u128,
157    ptr_align_accum: u128,
158    num_allocs: u64,
159    num_frees: u64,
160    mem_allocated: u64,
161    mem_freed: u64,
162    peak_mem: u64,
163    peak_mem_allocs: u64,
164    #[cfg(feature = "tracing")]
165    tracing: tracing::TracingState,
166}
167
168impl LocalState {
169    fn record_alloc(&mut self, ptr: *const u8, layout: Layout) {
170        if ptr.is_null() {
171            return;
172        }
173        let ptr_hash = hash_fn(ptr as usize);
174        let size_hash = hash_fn(layout.size());
175        let align_hash = hash_fn(layout.align());
176        self.ptr_accum = self.ptr_accum.wrapping_add(ptr_hash);
177        self.ptr_size_accum = self
178            .ptr_size_accum
179            .wrapping_add(ptr_hash.wrapping_mul(size_hash));
180        self.ptr_align_accum = self
181            .ptr_align_accum
182            .wrapping_add(ptr_hash.wrapping_mul(align_hash));
183        self.num_allocs += 1;
184        self.mem_allocated += layout.size() as u64;
185
186        if self.mem_allocated > self.mem_freed {
187            let mem_usage = self.mem_allocated - self.mem_freed;
188            if mem_usage > self.peak_mem {
189                self.peak_mem = mem_usage;
190                self.peak_mem_allocs = self.num_allocs.saturating_sub(self.num_frees);
191            }
192        }
193
194        #[cfg(feature = "tracing")]
195        self.tracing.record_alloc(ptr, layout);
196    }
197    fn record_free(&mut self, ptr: *const u8, layout: Layout) {
198        let ptr_hash = hash_fn(ptr as usize);
199        let size_hash = hash_fn(layout.size());
200        let align_hash = hash_fn(layout.align());
201        self.ptr_accum = self.ptr_accum.wrapping_sub(ptr_hash);
202        self.ptr_size_accum = self
203            .ptr_size_accum
204            .wrapping_sub(ptr_hash.wrapping_mul(size_hash));
205        self.ptr_align_accum = self
206            .ptr_align_accum
207            .wrapping_sub(ptr_hash.wrapping_mul(align_hash));
208        self.num_frees += 1;
209        self.mem_freed += layout.size() as u64;
210
211        #[cfg(feature = "tracing")]
212        self.tracing.record_free(ptr, layout);
213    }
214    fn start(&mut self) {
215        *self = Default::default();
216        #[cfg(feature = "tracing")]
217        self.tracing.start();
218    }
219
220    fn finish(&mut self) -> AllocInfo {
221        let result = if self.num_allocs > self.num_frees {
222            Err(AllocError::Leak)
223        } else if self.num_allocs < self.num_frees {
224            Err(AllocError::DoubleFree)
225        } else if self.num_allocs == 0 {
226            Err(AllocError::NoData)
227        } else if self.ptr_accum != 0 {
228            Err(AllocError::BadPtr)
229        } else {
230            match (self.ptr_size_accum != 0, self.ptr_align_accum != 0) {
231                (true, true) => Err(AllocError::BadLayout),
232                (true, false) => Err(AllocError::BadSize),
233                (false, true) => Err(AllocError::BadAlignment),
234                (false, false) => Ok(()),
235            }
236        };
237        AllocInfo {
238            result,
239            num_allocs: self.num_allocs,
240            num_frees: self.num_frees,
241            mem_allocated: self.mem_allocated,
242            mem_freed: self.mem_freed,
243            peak_mem: self.peak_mem,
244            peak_mem_allocs: self.peak_mem_allocs,
245            #[cfg(feature = "tracing")]
246            tracing: self.tracing.finish(),
247        }
248    }
249}
250
251thread_local! {
252    static ENABLED: Cell<bool> = Cell::new(false);
253    static LOCAL_STATE: RefCell<LocalState> = RefCell::new(LocalState::default());
254}
255
256/// Wraps an existing allocator to allow detecting allocation bugs.
257/// You should use the `#[global_allocator]` attribute to activate
258/// this allocator.
259pub struct Mockalloc<T: GlobalAlloc>(pub T);
260
261unsafe impl<T: GlobalAlloc> GlobalAlloc for Mockalloc<T> {
262    unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
263        let ptr = self.0.alloc(layout);
264        with_local_state(|state| {
265            state.record_alloc(ptr, layout);
266        });
267        ptr
268    }
269
270    unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
271        with_local_state(|state| {
272            state.record_free(ptr, layout);
273        });
274        self.0.dealloc(ptr, layout);
275    }
276
277    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
278        // SAFETY: the caller must ensure that the `new_size` does not overflow.
279        // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
280        let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
281        let new_ptr = self.0.realloc(ptr, layout, new_size);
282        with_local_state(|state| {
283            state.record_free(ptr, layout);
284            state.record_alloc(new_ptr, new_layout);
285        });
286        new_ptr
287    }
288
289    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
290        let ptr = self.0.alloc_zeroed(layout);
291        with_local_state(|state| {
292            state.record_alloc(ptr, layout);
293        });
294        ptr
295    }
296}
297
298/// Types of allocation bug which can be detected by the allocator.
299#[derive(Debug, Clone, PartialEq)]
300#[non_exhaustive]
301pub enum AllocError {
302    /// No allocations were detected. Perhaps `Mockalloc` isn't enabled
303    /// as the global allocator?
304    NoData,
305    /// There were more calls to `alloc` than to `dealloc`.
306    Leak,
307    /// There were more calls to `dealloc` than to `alloc`.
308    DoubleFree,
309    /// A pointer was passed to `dealloc` which was not previously
310    /// returned by `alloc`.
311    BadPtr,
312    /// The size specified in a call to `dealloc` did not match that
313    /// specified in the corresponding `alloc` call.
314    BadSize,
315    /// The alignment specified in a call to `dealloc` did not match that
316    /// specified in the corresponding `alloc` call.
317    BadAlignment,
318    /// The size and alignment specified in a call to `dealloc` did not match
319    /// those specified in the corresponding `alloc` call.
320    BadLayout,
321}
322
323/// Captures information about the allocations performed by a region under
324/// test.
325#[derive(Debug, Clone)]
326pub struct AllocInfo {
327    num_allocs: u64,
328    num_frees: u64,
329    mem_allocated: u64,
330    mem_freed: u64,
331    peak_mem: u64,
332    peak_mem_allocs: u64,
333    result: Result<(), AllocError>,
334    #[cfg(feature = "tracing")]
335    tracing: tracing::TracingInfo,
336}
337
338impl AllocInfo {
339    /// Returns the total number of allocations performed.
340    pub fn num_allocs(&self) -> u64 {
341        self.num_allocs
342    }
343    /// Returns the total number of frees performed.
344    pub fn num_frees(&self) -> u64 {
345        self.num_frees
346    }
347    /// Returns the total number of frees performed.
348    pub fn num_leaks(&self) -> u64 {
349        self.num_allocs - self.num_frees
350    }
351    /// Returns the total amount of memory allocated.
352    pub fn mem_allocated(&self) -> u64 {
353        self.mem_allocated
354    }
355    /// Returns the total amount of memory leaked.
356    pub fn mem_leaked(&self) -> u64 {
357        self.mem_allocated - self.mem_freed
358    }
359    /// Returns the total amount of memory leaked.
360    pub fn mem_freed(&self) -> u64 {
361        self.mem_freed
362    }
363    /// Returns peak memory usage, not including any overhead used by the allocator.
364    pub fn peak_mem(&self) -> u64 {
365        self.peak_mem
366    }
367    /// Returns the number of active allocations during peak memory usage.
368    pub fn peak_mem_allocs(&self) -> u64 {
369        self.peak_mem_allocs
370    }
371    /// Returns an `Err(..)` result if any allocation bugs were detected.
372    pub fn result(&self) -> Result<(), AllocError> {
373        self.result.clone()
374    }
375    /// Returns the detailed trace of leaks and errors.
376    #[cfg(feature = "tracing")]
377    pub fn tracing(&self) -> &tracing::TracingInfo {
378        &self.tracing
379    }
380}
381
382struct AllocChecker(bool);
383
384impl AllocChecker {
385    fn new() -> Self {
386        LOCAL_STATE.with(|rc| rc.borrow_mut().start());
387        ENABLED.with(|c| {
388            assert!(!c.get(), "Mockalloc already recording");
389            c.set(true);
390        });
391        Self(true)
392    }
393    fn finish(mut self) -> AllocInfo {
394        self.0 = false;
395        ENABLED.with(|c| c.set(false));
396        LOCAL_STATE.with(|rc| rc.borrow_mut().finish())
397    }
398}
399
400impl Drop for AllocChecker {
401    fn drop(&mut self) {
402        if self.0 {
403            ENABLED.with(|c| c.set(false));
404            LOCAL_STATE.with(|rc| rc.borrow_mut().finish());
405        }
406    }
407}
408
409/// Records the allocations within a code block.
410pub fn record_allocs(f: impl FnOnce()) -> AllocInfo {
411    let checker = AllocChecker::new();
412    f();
413    checker.finish()
414}
415
416/// Records the allocations within a code block and asserts that no issues
417/// were detected.
418///
419/// No checks are performed if `miri` is detected, as we cannot collect
420/// allocation data in that case, and `miri` performs many of these
421/// checks already.
422///
423/// If the `tracing` feature is enabled and an error or leak is detected,
424/// this function also prints out the full trace to `stderr`.
425pub fn assert_allocs(f: impl FnOnce()) {
426    if cfg!(miri) {
427        f();
428    } else {
429        let info = record_allocs(f);
430        #[cfg(feature = "tracing")]
431        if info.result.is_err() {
432            eprintln!("# Mockalloc trace:\n\n{:#?}", info.tracing);
433        }
434        info.result.unwrap();
435    }
436}
437
438/// Returns `true` if allocations are currently being recorded, ie. if
439/// we're inside a call to `record_allocs`.
440pub fn is_recording() -> bool {
441    ENABLED.with(|c| c.get())
442}
443
444fn with_local_state(f: impl FnOnce(&mut LocalState)) {
445    if !is_recording() {
446        return;
447    }
448    ENABLED.with(|c| c.set(false));
449    LOCAL_STATE.with(|rc| f(&mut rc.borrow_mut()));
450    ENABLED.with(|c| c.set(true));
451}
452
453pub use mockalloc_macros::test;
454
455#[cfg(test)]
456mod tests {
457    use super::{is_recording, record_allocs, AllocError, Mockalloc};
458    use std::alloc::{GlobalAlloc, Layout, System};
459    use std::{cmp, mem, ptr};
460
461    struct LeakingAllocator(System);
462
463    unsafe impl GlobalAlloc for LeakingAllocator {
464        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
465            self.0.alloc_zeroed(layout)
466        }
467
468        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
469            if is_recording() {
470                // SAFETY: the caller must ensure that the `new_size` does not overflow.
471                // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
472                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
473                // SAFETY: the caller must ensure that `new_layout` is greater than zero.
474                let new_ptr = self.alloc(new_layout);
475                if !new_ptr.is_null() {
476                    // SAFETY: the previously allocated block cannot overlap the newly allocated block.
477                    // The safety contract for `dealloc` must be upheld by the caller.
478                    ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
479                    self.dealloc(ptr, layout);
480                }
481                new_ptr
482            } else {
483                self.0.realloc(ptr, layout, new_size)
484            }
485        }
486
487        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
488            self.0.alloc(layout)
489        }
490
491        unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
492            if !is_recording() {
493                self.0.dealloc(ptr, layout);
494            }
495        }
496    }
497
498    // We suppress calls to `dealloc` whilst recording so that our tests don't cause UB
499    // when simulating bad requests to the allocator.
500    #[global_allocator]
501    static A: Mockalloc<LeakingAllocator> = Mockalloc(LeakingAllocator(System));
502
503    fn do_some_allocations() -> Vec<Box<i32>> {
504        let mut a = Vec::new();
505        let mut b = Vec::new();
506        for i in 0..32 {
507            let p = Box::new(i);
508            if i % 2 == 0 {
509                a.push(p);
510            } else {
511                b.push(p);
512            }
513        }
514        a
515    }
516
517    #[test]
518    fn it_works() {
519        let alloc_info = record_allocs(|| {
520            let _p = Box::new(42);
521        });
522        alloc_info.result().unwrap();
523        assert_eq!(alloc_info.num_allocs(), 1);
524        assert_eq!(alloc_info.num_frees(), 1);
525        assert_eq!(alloc_info.peak_mem(), 4);
526        assert_eq!(alloc_info.peak_mem_allocs(), 1);
527    }
528
529    #[test]
530    fn it_detects_leak() {
531        let alloc_info = record_allocs(|| {
532            mem::forget(Box::new(42));
533        });
534        assert_eq!(alloc_info.result().unwrap_err(), AllocError::Leak);
535        assert_eq!(alloc_info.num_allocs(), 1);
536        assert_eq!(alloc_info.num_frees(), 0);
537    }
538
539    #[test]
540    fn it_detects_bad_layout() {
541        let alloc_info = record_allocs(|| unsafe {
542            mem::transmute::<_, Box<f64>>(Box::new(42u32));
543        });
544        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadLayout);
545        assert_eq!(alloc_info.num_allocs(), 1);
546        assert_eq!(alloc_info.num_frees(), 1);
547    }
548
549    #[test]
550    fn it_detects_no_data() {
551        let alloc_info = record_allocs(|| ());
552        assert_eq!(alloc_info.result().unwrap_err(), AllocError::NoData);
553        assert_eq!(alloc_info.num_allocs(), 0);
554        assert_eq!(alloc_info.num_frees(), 0);
555    }
556
557    #[test]
558    fn it_detects_bad_alignment() {
559        let alloc_info = record_allocs(|| unsafe {
560            mem::transmute::<_, Box<[u8; 4]>>(Box::new(42u32));
561        });
562        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadAlignment);
563        assert_eq!(alloc_info.num_allocs(), 1);
564        assert_eq!(alloc_info.num_frees(), 1);
565    }
566
567    #[test]
568    fn it_detects_bad_size() {
569        let alloc_info = record_allocs(|| unsafe {
570            mem::transmute::<_, Box<[u32; 2]>>(Box::new(42u32));
571        });
572        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadSize);
573        assert_eq!(alloc_info.num_allocs(), 1);
574        assert_eq!(alloc_info.num_frees(), 1);
575    }
576
577    #[test]
578    fn it_detects_double_free() {
579        let alloc_info = record_allocs(|| unsafe {
580            let mut x = mem::ManuallyDrop::new(Box::new(42));
581            mem::ManuallyDrop::drop(&mut x);
582            mem::ManuallyDrop::drop(&mut x);
583        });
584        assert_eq!(alloc_info.result().unwrap_err(), AllocError::DoubleFree);
585        assert_eq!(alloc_info.num_allocs(), 1);
586        assert_eq!(alloc_info.num_frees(), 2);
587    }
588
589    #[test]
590    fn it_detects_bad_ptr() {
591        let alloc_info = record_allocs(|| unsafe {
592            let mut x = Box::new(42);
593            *mem::transmute::<_, &mut usize>(&mut x) += 1;
594        });
595        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadPtr);
596        assert_eq!(alloc_info.num_allocs(), 1);
597        assert_eq!(alloc_info.num_frees(), 1);
598    }
599
600    #[test]
601    fn it_works_amongst_many() {
602        let alloc_info = record_allocs(|| {
603            let _unused = do_some_allocations();
604            let _p = Box::new(42);
605            let _unused = do_some_allocations();
606        });
607        alloc_info.result().unwrap();
608        assert_eq!(alloc_info.peak_mem(), 580);
609        assert_eq!(alloc_info.peak_mem_allocs(), 52);
610    }
611
612    #[test]
613    fn it_detects_leak_amongst_many() {
614        let alloc_info = record_allocs(|| {
615            let _unused = do_some_allocations();
616            let p = Box::new(42);
617            let _unused = do_some_allocations();
618            mem::forget(p);
619            let _unused = do_some_allocations();
620        });
621        assert_eq!(alloc_info.result().unwrap_err(), AllocError::Leak);
622    }
623
624    #[test]
625    fn it_detects_bad_layout_amongst_many() {
626        let alloc_info = record_allocs(|| unsafe {
627            let _unused = do_some_allocations();
628            let p = Box::new(42u32);
629            let _unused = do_some_allocations();
630            mem::transmute::<_, Box<f64>>(p);
631            let _unused = do_some_allocations();
632        });
633        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadLayout);
634    }
635
636    #[test]
637    fn it_detects_bad_alignment_amongst_many() {
638        let alloc_info = record_allocs(|| unsafe {
639            let _unused = do_some_allocations();
640            let p = Box::new(42u32);
641            let _unused = do_some_allocations();
642            mem::transmute::<_, Box<[u8; 4]>>(p);
643            let _unused = do_some_allocations();
644        });
645        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadAlignment);
646    }
647
648    #[test]
649    fn it_detects_bad_size_amongst_many() {
650        let alloc_info = record_allocs(|| unsafe {
651            let _unused = do_some_allocations();
652            let p = Box::new(42u32);
653            let _unused = do_some_allocations();
654            mem::transmute::<_, Box<[u32; 2]>>(p);
655            let _unused = do_some_allocations();
656        });
657        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadSize);
658    }
659
660    #[test]
661    fn it_detects_double_free_amongst_many() {
662        let alloc_info = record_allocs(|| unsafe {
663            let _unused = do_some_allocations();
664            let mut x = mem::ManuallyDrop::new(Box::new(42));
665            let _unused = do_some_allocations();
666            mem::ManuallyDrop::drop(&mut x);
667            let _unused = do_some_allocations();
668            mem::ManuallyDrop::drop(&mut x);
669            let _unused = do_some_allocations();
670        });
671        assert_eq!(alloc_info.result().unwrap_err(), AllocError::DoubleFree);
672    }
673
674    #[test]
675    fn it_detects_bad_ptr_amongst_many() {
676        let alloc_info = record_allocs(|| unsafe {
677            let _unused = do_some_allocations();
678            let mut x = Box::new(42);
679            let _unused = do_some_allocations();
680            *mem::transmute::<_, &mut usize>(&mut x) += 1;
681            let _unused = do_some_allocations();
682        });
683        assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadPtr);
684    }
685}