slitter 0.1.0

A C- and Rust-callable slab allocator with a focus on safety
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
//! A `Mill` hands out parcels of span data and associated span
//! metadata to `Press`es.  We expect multiple `Press`es to share
//! the same `Mill`, and `Press`es belong to `ClassInfo`, and are
//! thus immortal; `Mill`s are also immortal.
//!
//! Each `Mill` owns a large `Chunk`, and associated range of
//! metadata, and partitions that chunk into smaller `Span`s.  The
//! chunk data is a 1 GB range, aligned to 1 GB, and the associated
//! array of metadata lives at a fixed offset from the beginning of
//! the chunk.  The layout looks like
//!
//! | guard | meta | guard | data ... data | guard |
//!
//! where the guard and meta(data) regions are 2 MB each, and the
//! data is 1 GB, aligned to 1 GB.
//!
//! Each chunk is divided 64 K spans of 16 KB each.  Each span is
//! associated with a metadata object in the parallel flat array that
//! lives in the metadata range.
#[cfg(any(
    all(test, feature = "check_contracts_in_tests"),
    feature = "check_contracts"
))]
use contracts::*;
#[cfg(not(any(
    all(test, feature = "check_contracts_in_tests"),
    feature = "check_contracts"
)))]
use disabled_contracts::*;

use std::collections::HashMap;
use std::ffi::c_void;
use std::num::NonZeroU32;
use std::num::NonZeroUsize;
use std::ptr::NonNull;
use std::sync::atomic::AtomicUsize;
use std::sync::Mutex;

#[cfg(any(
    all(test, feature = "check_contracts_in_tests"),
    feature = "check_contracts"
))]
use crate::debug_arange_map;

use crate::mapper::Mapper;

/// Data chunks are naturally aligned to their size, 1GB.
#[cfg(not(feature = "test_only_small_constants"))]
const DATA_ALIGNMENT: usize = 1 << 30;
/// We use 2 MB sizes to enable huge pages.  3 guard superpages + 1
/// metadata superpage per chunk is still less than 1% overhead.
#[cfg(not(feature = "test_only_small_constants"))]
pub const GUARD_PAGE_SIZE: usize = 2 << 20;
#[cfg(not(feature = "test_only_small_constants"))]
const METADATA_PAGE_SIZE: usize = 2 << 20;

/// Spans are aligned to 16 KB, within the chunk.
#[cfg(not(feature = "test_only_small_constants"))]
pub const SPAN_ALIGNMENT: usize = 16 << 10;

// Try to shrink everything by ~512.  We have to bump
// up the metadata page size a little, since we also
// want a smaller span alignment (and the metadata
// array must include one entry per potential span).
// Keep `GUARD_PAGE_SIZE` equal to `METADATA_PAGE_SIZE`
// to better match production.
#[cfg(feature = "test_only_small_constants")]
const DATA_ALIGNMENT: usize = 2 << 20;
#[cfg(feature = "test_only_small_constants")]
pub const GUARD_PAGE_SIZE: usize = 16 << 10;
#[cfg(feature = "test_only_small_constants")]
const METADATA_PAGE_SIZE: usize = 16 << 10;

#[cfg(feature = "test_only_small_constants")]
pub const SPAN_ALIGNMENT: usize = 4 << 10;

/// Maximum size in bytes we can service for a single span.  The
/// higher this value, the more bytes we may lose to fragmentation
/// when the remaining bytes in a chunk aren't enough.
///
/// Setting this to 1/16th of the chunk size means we lose at most
/// ~6.25% to that source of fragmentation.
pub const MAX_SPAN_SIZE: usize = DATA_ALIGNMENT / 16;

/// By default, we want to carve our *nearly* 1 MB spans: the slight
/// misalignment spreads out our metadata to different cache sets.
#[cfg(not(feature = "test_only_small_constants"))]
pub const DEFAULT_DESIRED_SPAN_SIZE: usize = (1 << 20) - SPAN_ALIGNMENT;

#[cfg(feature = "test_only_small_constants")]
pub const DEFAULT_DESIRED_SPAN_SIZE: usize = (8 << 10) - SPAN_ALIGNMENT;

static_assertions::const_assert!(DEFAULT_DESIRED_SPAN_SIZE <= MAX_SPAN_SIZE);

/// Grabbing an address space of at least this many bytes should be
/// enough to find a spot for the span + its metadata.
///
/// If the first aligned region has enough of a prefix for 2 guard
/// pages and the metadata, we're good.
///
/// Otherwise, the next aligned region is at most `DATA_ALIGNMENT + 2
/// * GUARD_PAGE_SIZE + METADATA_PAGE_SIZE` in, which must leave at
/// least the suffix `GUARD_PAGE` before the end of the region.
///
/// The code in this module, including this constant, assumes that
/// `DATA_ALIGNMENT` is greater than the total amount of bytes we wish
/// to keep around the data span.
const MAPPED_REGION_SIZE: usize = 2 * DATA_ALIGNMENT + 3 * GUARD_PAGE_SIZE + METADATA_PAGE_SIZE;

const PREFIX_SIZE: usize = GUARD_PAGE_SIZE + METADATA_PAGE_SIZE + GUARD_PAGE_SIZE;
const SUFFIX_SIZE: usize = GUARD_PAGE_SIZE;

// The constants are tightly coupled.  Make sure they make sense.
static_assertions::const_assert_eq!(
    MAPPED_REGION_SIZE,
    2 * DATA_ALIGNMENT + PREFIX_SIZE + SUFFIX_SIZE
);

// We must have enough room in the metadata page for our spans'
// metadata.  There are `DATA_ALIGNMENT / SPAN_ALIGNMENT` span-aligned
// regions per data region, and each one needs a `SpanMetadata`.  The
// corresponding array of `SpanMetadata` must fit in the
// `METADATA_PAGE_SIZE`.
static_assertions::const_assert!(
    DATA_ALIGNMENT / SPAN_ALIGNMENT <= METADATA_PAGE_SIZE / std::mem::size_of::<SpanMetadata>()
);

static_assertions::const_assert!(std::mem::size_of::<MetaArray>() <= METADATA_PAGE_SIZE);

#[derive(Debug)]
#[repr(C)]
pub struct SpanMetadata {
    /// The id of the class for all allocations in this span.  We
    /// don't use a `Class` because 0-initialised data must be valid.
    pub(crate) class_id: Option<NonZeroU32>,
    /// Number of elements available in the span
    pub(crate) bump_limit: u32,
    /// Number of elements allocated in the span
    pub(crate) bump_ptr: AtomicUsize,
    /// Start address for the span.
    pub(crate) span_begin: usize,
}

#[allow(unused)]
extern "C" {
    fn unused_span_metadata_is_zero_safe() -> SpanMetadata;
}

/// A `MilledRange` represents a newly allocated span of data, and
/// associated metadata struct.
#[derive(Debug)]
pub struct MilledRange {
    /// The meta object is initially zero-filled, and is the first
    /// SpanMetadata object for the allocated span.
    pub meta: &'static mut SpanMetadata,
    /// A span may be associated with multiple metadata structs (but
    /// each SpanMetadata only maps to one span); the trail slice
    /// contains the remaining metadata structs.  They must be
    /// initialised so that we can later get the metadata (class id)
    /// for an allocated object.
    pub trail: &'static mut [SpanMetadata],
    pub data: *mut c_void,
    pub data_size: usize,
}

/// The array of metadata associated with a chunk.
#[derive(Debug)]
struct MetaArray {
    chunk_meta: [SpanMetadata; DATA_ALIGNMENT / SPAN_ALIGNMENT],
}

#[derive(Debug)]
struct Chunk {
    meta: *mut MetaArray,
    spans: usize,      // address where the chunk's spans start.
    span_count: usize, // chunk size in in spans

    // Bump pointer to allocate spans
    next_free_span: usize,
}

/// We manage the metadata array and the spans in each Chunk to avoid
/// accidental sharing.  That's why it's safe to `Send` them.
unsafe impl Send for Chunk {}

#[derive(Debug)]
pub struct Mill {
    mapper: &'static dyn Mapper,
    current_chunk: Mutex<Option<Chunk>>,
}

/// Returns a reference to the `Mill` for that `mapper_name`,
/// or the default `Mill` if the name is `None`.
///
/// # Errors
///
/// Returns `Err` if no such mapper is defined.
pub fn get_mill(mapper_name: Option<&str>) -> Result<&'static Mill, &'static str> {
    lazy_static::lazy_static! {
        // The keys are the addresses of `&'static dyn Mapper`.
        static ref MILLS: Mutex<HashMap<usize, &'static Mill>> = Default::default();
    }

    let mapper: &'static _ = crate::mapper::get_mapper(mapper_name)?;
    let address = mapper as *const _ as *const () as usize;
    let mut mills = MILLS.lock().unwrap();
    Ok(mills
        .entry(address)
        .or_insert_with(|| Box::leak(Box::new(Mill::new(mapper)))))
}

impl SpanMetadata {
    /// Maps a Press-allocated address to its metadata.
    pub fn from_allocation_address(address: usize) -> *mut SpanMetadata {
        let base = address - (address % DATA_ALIGNMENT);
        let index = (address - base) / SPAN_ALIGNMENT; // Span id.
        let meta = base - GUARD_PAGE_SIZE - METADATA_PAGE_SIZE;

        unsafe { (meta as *mut SpanMetadata).add(index) }
    }
}

/// We track the exact way we want to partition a range of address
/// space in an `AllocatedChunk`.
///
/// The space looks like:
///
/// - slop:  [base, bottom_slop_end)
/// - guard: [bottom_stop_end, meta)
/// - meta:  [meta, meta + 4K)
/// - guard: [meta + 4K, data)
/// - data:  [data, top_slop_begin - 4K)
/// - guard: [top_slop_begin - 4K, top_slop_begin),
/// - slop:  [top_slop_begin, top)
///
/// Once the `meta` and `data` regions are successfully allocated, we
/// want to get rid of the bottom and top "slop" regions, then
/// everything remains immortal.
///
/// On failure, we must instead release everything from `base` to `top`.
#[derive(Debug)]
struct AllocatedChunk<'a> {
    mapper: &'a dyn Mapper,
    // All these `usize` are addresses.
    base: NonZeroUsize,     // page-aligned
    top: NonZeroUsize,      // page-aligned
    bottom_slop_end: usize, // page-aligned
    pub meta: *mut MetaArray,
    pub data: *mut c_void,
    pub data_end: usize,
    top_slop_begin: usize, // page-aligned
}

impl<'a> AllocatedChunk<'a> {
    /// Attempts to carve out a span + metadata from a new range of
    /// address space returned by `mapper`.
    ///
    /// # Errors
    ///
    /// Returns `Err` when the mapper itself fails.  Failures downstream
    /// indicate that the mapper returned an invalid range, and result
    /// in panic.
    pub fn new(mapper: &'a dyn Mapper) -> Result<AllocatedChunk<'a>, i32> {
        let page_size = mapper.page_size();
        let mut size = MAPPED_REGION_SIZE;
        if (size % page_size) > 0 {
            size = page_size * (1 + (size / page_size));
        }

        let (region, actual): (NonNull<c_void>, usize) =
            mapper.reserve(size, DATA_ALIGNMENT, PREFIX_SIZE, SUFFIX_SIZE)?;
        Ok(AllocatedChunk::new_from_range(
            mapper,
            NonZeroUsize::new(region.as_ptr() as usize).expect("NonNull should be NonZero"),
            actual,
        )
        .expect("mapper returned a bad region"))
    }

    /// Attempts to carve out a span + metadata in `[base, base + size)`.
    ///
    /// TODO: Test the core of this function symbolically.
    fn new_from_range(
        mapper: &'a dyn Mapper,
        base: NonZeroUsize,
        size: usize,
    ) -> Result<AllocatedChunk<'a>, &'static str> {
        let page_size = mapper.page_size();

        // Try to find the data address.  It must be aligned to 2MB,
        // and have room for the metadata + guard pages before/after
        // the data span.
        if (base.get() % page_size) != 0 {
            return Err("base is incorrectly aligned");
        }

        if (size % page_size) != 0 {
            return Err("size is incorrectly aligned");
        }

        // Since we track the exclusive end of the region, this will
        // fail if someone manages to map a region that contains the
        // last page of the address space.  That doesn't happen for
        // userspace on platforms we care about.
        let top = NonZeroUsize::new(
            base.get()
                .checked_add(size)
                .ok_or("input region wraps around")?,
        )
        .expect("must be non-zero");
        let mut data = DATA_ALIGNMENT
            .checked_mul((base.get() / DATA_ALIGNMENT) + 1)
            .ok_or("overflow in alignment")?;

        assert!(data >= base.get());
        // If there's not enough room for the prefix, go forward by
        // another span size.
        if (data - base.get()) < PREFIX_SIZE {
            data = data.checked_add(DATA_ALIGNMENT).ok_or("overflow in bump")?;
        }

        // This subtraction is safe before data >= base + PREFIX_SIZE.
        let mut bottom_slop_end = data.checked_sub(PREFIX_SIZE).unwrap();
        // Make sure bottom_slop_end is aligned to a page.  It's always OK to remove *less*.
        bottom_slop_end -= bottom_slop_end % page_size;

        // This addition is safe for the same reason.
        let meta = bottom_slop_end.checked_add(GUARD_PAGE_SIZE).unwrap() as *mut MetaArray;

        let data_end = data
            .checked_add(DATA_ALIGNMENT)
            .ok_or("overflow in data_end")?;
        let mut suffix_end = data_end
            .checked_add(SUFFIX_SIZE)
            .ok_or("overflow in suffix_end")?;
        // Align suffix_end to a page.  It's always OK to remove less,
        // so we bump that up.
        if (suffix_end % page_size) > 0 {
            suffix_end = page_size * (1 + (suffix_end / page_size));
        }

        if suffix_end > top.get() {
            return Err("region too small");
        }

        Ok(AllocatedChunk {
            mapper,
            base,
            top,
            bottom_slop_end,
            meta,
            data: data as *mut c_void,
            data_end,
            top_slop_begin: suffix_end,
        })
    }

    /// Asserts against internal invariants.
    pub fn check_rep(&self) {
        let page_size = self.mapper.page_size();

        // Check that [base, top) makes sense.
        assert_eq!(self.base.get() % page_size, 0, "self: {:?}", self);
        assert_eq!(self.top.get() % page_size, 0, "self: {:?}", self);
        assert!(self.base.get() <= self.top.get(), "self: {:?}", self);

        // Now the slop regions.
        assert_eq!(self.bottom_slop_end % page_size, 0, "self: {:?}", self);
        assert_eq!(self.top_slop_begin % page_size, 0, "self: {:?}", self);

        assert!(self.bottom_slop_end >= self.base.get(), "self: {:?}", self);
        assert!(self.top_slop_begin <= self.top.get(), "self: {:?}", self);
        assert!(
            self.bottom_slop_end <= self.top_slop_begin,
            "self: {:?}",
            self
        );

        // The meta region must be between the slops, and must be
        // correctly offset from the data.
        assert!(
            self.meta as usize >= self.bottom_slop_end + GUARD_PAGE_SIZE,
            "self: {:?}",
            self
        );
        assert!(
            self.meta as usize + METADATA_PAGE_SIZE <= self.top_slop_begin,
            "self: {:?}",
            self
        );

        assert_eq!(
            self.meta as usize + METADATA_PAGE_SIZE + GUARD_PAGE_SIZE,
            self.data as usize,
            "self: {:?}",
            self
        );

        // Now do the data.
        assert_eq!(self.data as usize % DATA_ALIGNMENT, 0, "self: {:?}", self);
        assert!(
            self.data as usize >= self.bottom_slop_end,
            "self: {:?}",
            self
        );
        assert!(
            self.data as usize + DATA_ALIGNMENT + GUARD_PAGE_SIZE <= self.top_slop_begin,
            "self: {:?}",
            self
        );

        assert_eq!(
            SpanMetadata::from_allocation_address(self.data as usize),
            self.meta as *mut SpanMetadata
        );
        assert_eq!(
            SpanMetadata::from_allocation_address(self.data as usize + (DATA_ALIGNMENT - 1)),
            unsafe { (self.meta as *mut SpanMetadata).add(DATA_ALIGNMENT / SPAN_ALIGNMENT - 1) }
        );
    }

    /// Attempts to allocate the chunk, and calls `f` if that succeeds.
    ///
    /// If `f` fails, releases the chunk; if `f` succeeds, commits it.
    #[ensures(ret.is_ok() == debug_arange_map::is_metadata(old(self.meta as usize), METADATA_PAGE_SIZE).is_ok(),
               "The metadata region is marked as such on success.")]
    #[ensures(ret.is_ok() == debug_arange_map::is_data(old(self.data as usize), DATA_ALIGNMENT).is_ok(),
               "The data region is marked as such on success.")]
    pub fn call_with_chunk<T>(self, f: impl FnOnce(&Self) -> Result<T, i32>) -> Result<T, i32> {
        self.check_rep();
        self.allocate()?;

        let ret = f(&self);
        if ret.is_err() {
            self.release_all()?;
        } else {
            // TODO: figure out failure logging.
            let _ = self.commit();
        }

        ret
    }

    /// Releases the region covered by the AllocatedChunk.
    fn release_all(self) -> Result<(), i32> {
        self.mapper.release(
            NonNull::new(self.base.get() as *mut c_void).expect("must be valid"),
            self.top.get() - self.base.get(),
        )
    }

    /// Backs the data and metadata region with memory.
    #[ensures(debug_arange_map::is_metadata(self.meta as usize, METADATA_PAGE_SIZE).is_ok(),
               "The metadata region is marked as such.")]
    #[ensures(debug_arange_map::is_data(self.data as usize, DATA_ALIGNMENT).is_ok(),
               "The data region is marked as such.")]
    fn allocate(&self) -> Result<(), i32> {
        // Ensures the region containing [begin, begin + size) is
        // backed by memory, by rounding outward to a page.
        fn rounded_allocate(
            page_size: usize,
            mut begin: usize,
            size: usize,
            allocator: impl FnOnce(NonNull<c_void>, usize) -> Result<(), i32>,
        ) -> Result<(), i32> {
            let mut top = begin + size;

            begin -= begin % page_size;
            if (top % page_size) > 0 {
                top = page_size * (1 + (top / page_size));
            }

            allocator(
                NonNull::new(begin as *mut c_void).expect("must be valid"),
                top - begin,
            )
        }

        let page_size = self.mapper.page_size();
        rounded_allocate(
            page_size,
            self.meta as usize,
            METADATA_PAGE_SIZE,
            |begin, size| self.mapper.allocate_meta(begin, size),
        )?;
        rounded_allocate(
            page_size,
            self.data as usize,
            DATA_ALIGNMENT,
            |begin, size| self.mapper.allocate_data(begin, size),
        )
    }

    /// Releases any slop around the allocated memory.
    #[invariant(debug_arange_map::is_metadata(self.meta as usize, METADATA_PAGE_SIZE).is_ok(),
               "The metadata region is marked as such.")]
    #[invariant(debug_arange_map::is_data(self.data as usize, DATA_ALIGNMENT).is_ok(),
               "The data region is marked as such.")]
    fn commit(self) -> Result<(), i32> {
        fn release(mapper: &dyn Mapper, begin: usize, end: usize) -> Result<(), i32> {
            let page_size = mapper.page_size();

            assert!(begin <= end);
            assert_eq!(begin % page_size, 0);
            assert_eq!(end % page_size, 0);

            if begin == end {
                return Ok(());
            }

            mapper.release(
                NonNull::new(begin as *mut c_void).expect("must be valid"),
                end - begin,
            )
        }

        release(self.mapper, self.base.get(), self.bottom_slop_end)?;
        release(self.mapper, self.top_slop_begin, self.top.get())
    }
}

impl Mill {
    pub fn new(mapper: &'static dyn Mapper) -> Self {
        extern "C" {
            fn slitter__data_alignment() -> usize;
            fn slitter__guard_page_size() -> usize;
            fn slitter__metadata_page_size() -> usize;
            fn slitter__span_alignment() -> usize;
            fn slitter__span_metadata_size() -> usize;
        }

        unsafe {
            assert_eq!(DATA_ALIGNMENT, slitter__data_alignment());
            assert_eq!(GUARD_PAGE_SIZE, slitter__guard_page_size());
            assert_eq!(METADATA_PAGE_SIZE, slitter__metadata_page_size());
            assert_eq!(SPAN_ALIGNMENT, slitter__span_alignment());
            assert_eq!(
                std::mem::size_of::<SpanMetadata>(),
                slitter__span_metadata_size()
            );
        }

        Self {
            mapper,
            current_chunk: Default::default(),
        }
    }

    /// Returns a fresh chunk from `mapper`.
    #[ensures(ret.is_ok() ->
              debug_arange_map::is_metadata(ret.as_ref().unwrap().meta as usize,
                                            METADATA_PAGE_SIZE).is_ok(),
              "The metadata region is marked as such.")]
    #[ensures(ret.is_ok() ->
              debug_arange_map::is_data(ret.as_ref().unwrap().spans,
                                        ret.as_ref().unwrap().span_count * SPAN_ALIGNMENT).is_ok(),
              "The data region is marked as such.")]
    fn allocate_chunk(mapper: &dyn Mapper) -> Result<Chunk, i32> {
        AllocatedChunk::new(mapper)?.call_with_chunk(|chunk| {
            let meta = unsafe { chunk.meta.as_mut() }.expect("must be valid");
            Ok(Chunk {
                meta,
                spans: chunk.data as usize,
                span_count: DATA_ALIGNMENT / SPAN_ALIGNMENT,
                next_free_span: 0,
            })
        })
    }

    /// Attempts to chop a new set of spans from a chunk.
    ///
    /// On success, it will contain at least `min` spans, and up to `desired` spans.
    #[requires(debug_arange_map::is_metadata(chunk.meta as usize,
                                             METADATA_PAGE_SIZE).is_ok(),
               "The metadata region must be marked as such")]
    #[requires(debug_arange_map::is_data(chunk.spans as usize,
                                         chunk.span_count * SPAN_ALIGNMENT).is_ok(),
               "The data region must be marked as such")]
    #[requires(min <= DATA_ALIGNMENT / SPAN_ALIGNMENT,
        "The request must fit in a data chunk.")]
    #[ensures(ret.is_none() -> chunk.next_free_span == old(chunk.next_free_span),
              "Must leave the chunk untouched on success")]
    #[ensures(ret.is_some() ->
              ret.as_ref().unwrap().data_size >= min * SPAN_ALIGNMENT,
              "Must allocate enough for the min size on success.")]
    #[ensures(ret.is_some() ->
              chunk.next_free_span == old(chunk.next_free_span) + ret.as_ref().unwrap().data_size / SPAN_ALIGNMENT,
              "Must consume the bump pointer on success.")]
    fn allocate_span(chunk: &mut Chunk, min: usize, desired: usize) -> Option<MilledRange> {
        // Inspect this routine to confirm that the meta and data
        // regions are allocated according to `next_free_span`.  The
        // postconditions are a mess of casts.
        if chunk.next_free_span >= chunk.span_count {
            return None;
        }

        let remaining = chunk.span_count - chunk.next_free_span;
        // TODO: it would be nice to save these unused spans somewhere.
        if remaining < min {
            return None;
        }

        let allocated = remaining.min(desired);
        let index = chunk.next_free_span;
        chunk.next_free_span += allocated;

        let meta: &'static mut _ = unsafe { chunk.meta.as_mut() }.unwrap();
        // Bamboozle the borrow checker... we will pass two mutable
        // references to the chunk_meta array (and many more that the
        // checker isn't aware of), but they're all disjoint.
        let meta2: &'static mut _ = unsafe { chunk.meta.as_mut() }.unwrap();

        Some(MilledRange {
            meta: &mut meta.chunk_meta[index],
            trail: &mut meta2.chunk_meta[index + 1..index + allocated],
            data: (chunk.spans + index * SPAN_ALIGNMENT) as *mut c_void,
            data_size: allocated * SPAN_ALIGNMENT,
        })
    }

    /// Attempts to return a fresh range of allocation space.  On
    /// success, the newly milled range will contain at least
    /// `min_size` bytes, but the implementation tries to get
    /// `desired_size`, if possible.
    ///
    /// The `min_size` must be at most `MAX_SPAN_SIZE`.
    ///
    /// # Errors
    ///
    /// Returns `Err` on mapping failures (OOM-like conditions).
    #[requires(min_size <= MAX_SPAN_SIZE)]
    #[requires(min_size <= desired_size.unwrap_or(min_size))]
    pub fn get_span(
        &self,
        min_size: usize,
        desired_size: Option<usize>,
    ) -> Result<MilledRange, i32> {
        assert!(min_size <= MAX_SPAN_SIZE);

        // We must want at least min_size, and not more than the maximum span size.
        let desired = desired_size
            .unwrap_or(DEFAULT_DESIRED_SPAN_SIZE)
            .clamp(min_size, MAX_SPAN_SIZE);
        let min_span_count =
            (min_size / SPAN_ALIGNMENT) + ((min_size % SPAN_ALIGNMENT) > 0) as usize;
        let desired_span_count =
            (desired / SPAN_ALIGNMENT) + ((desired % SPAN_ALIGNMENT) > 0) as usize;

        let mut chunk_or = self.current_chunk.lock().unwrap();

        if chunk_or.is_none() {
            *chunk_or = Some(Mill::allocate_chunk(self.mapper)?);
        }

        if let Some(range) = Mill::allocate_span(
            chunk_or.as_mut().unwrap(),
            min_span_count,
            desired_span_count,
        ) {
            return Ok(range);
        }

        *chunk_or = Some(Mill::allocate_chunk(self.mapper)?);
        Ok(Mill::allocate_span(
            chunk_or.as_mut().unwrap(),
            min_span_count,
            desired_span_count,
        )
        .expect("New chunk must have a span"))
    }
}

#[test]
fn test_allocated_span_valid() {
    // Check that we can always construct an AllocatedChunk when we
    // pass in large enough regions.
    let mapper = crate::mapper::get_mapper(None).expect("Default mapper exists");

    // The test cases below assume that the GUARD_PAGE_SIZE and
    // METADATA_PAGE_SIZE are multiples of the page size.
    assert_eq!(GUARD_PAGE_SIZE % mapper.page_size(), 0);
    assert_eq!(METADATA_PAGE_SIZE % mapper.page_size(), 0);

    // We assume the zero page can't be mapped.  See what happens when
    // we get the next page.
    let at_start = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(mapper.page_size()).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    at_start.check_rep();

    // This is the highest possible address we might get, minus one
    // page: if the mapped region includes the last page, we fail to
    // represent the end of the region.  I don't think it's worth
    // adding complexity to handle a situation that never happens.
    let at_end = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(usize::MAX - MAPPED_REGION_SIZE - mapper.page_size() + 1).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    at_end.check_rep();

    let aligned = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(DATA_ALIGNMENT).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    aligned.check_rep();

    let unaligned = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(DATA_ALIGNMENT + mapper.page_size()).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    unaligned.check_rep();

    let offset_guard = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(DATA_ALIGNMENT - GUARD_PAGE_SIZE).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    offset_guard.check_rep();

    let offset_meta = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(DATA_ALIGNMENT - GUARD_PAGE_SIZE - METADATA_PAGE_SIZE).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    offset_meta.check_rep();

    let off_by_one = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(
            DATA_ALIGNMENT - 2 * GUARD_PAGE_SIZE - METADATA_PAGE_SIZE + mapper.page_size(),
        )
        .unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    off_by_one.check_rep();

    let exact_fit = AllocatedChunk::new_from_range(
        mapper,
        NonZeroUsize::new(DATA_ALIGNMENT - 2 * GUARD_PAGE_SIZE - METADATA_PAGE_SIZE).unwrap(),
        MAPPED_REGION_SIZE,
    )
    .expect("must construct");
    exact_fit.check_rep();
}