data_model/volatile_memory.rs
1// Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Types for volatile access to memory.
6//!
7//! Two of the core rules for safe rust is no data races and no aliased mutable references.
8//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
9//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11//! compiler reordering or eliding access because it has no visibility into what other systems are
12//! doing with that hunk of memory.
13//!
14//! For the purposes of maintaining safety, volatile memory has some rules of its own:
15//! 1. No references or slices to volatile memory (`&` or `&mut`).
16//! 2. Access should always been done with a volatile read or write.
17//! The First rule is because having references of any kind to memory considered volatile would
18//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19//! done concurrently without synchronization. With volatile access we know that the compiler has
20//! not reordered or elided the access.
21
22use std::cmp::min;
23use std::fmt;
24use std::io::Result as IoResult;
25use std::io::{Read, Write};
26use std::marker::PhantomData;
27use std::mem::size_of;
28use std::ptr::copy;
29use std::ptr::{null_mut, read_volatile, write_volatile};
30use std::result;
31use std::slice::{from_raw_parts, from_raw_parts_mut};
32use std::{isize, usize};
33
34use DataInit;
35
36#[derive(Eq, PartialEq, Debug)]
37pub enum VolatileMemoryError {
38 /// `addr` is out of bounds of the volatile memory slice.
39 OutOfBounds { addr: u64 },
40 /// Taking a slice at `base` with `offset` would overflow `u64`.
41 Overflow { base: u64, offset: u64 },
42}
43
44impl fmt::Display for VolatileMemoryError {
45 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
46 match self {
47 VolatileMemoryError::OutOfBounds { addr } => {
48 write!(f, "address 0x{:x} is out of bounds", addr)
49 }
50 VolatileMemoryError::Overflow { base, offset } => write!(
51 f,
52 "address 0x{:x} offset by 0x{:x} would overflow",
53 base, offset
54 ),
55 }
56 }
57}
58
59pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
60
61use VolatileMemoryError as Error;
62type Result<T> = VolatileMemoryResult<T>;
63
64/// Convenience function for computing `base + offset` which returns
65/// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
66/// `u64::MAX`.
67///
68/// # Examples
69///
70/// ```
71/// # use data_model::*;
72/// # fn get_slice(offset: u64, count: u64) -> VolatileMemoryResult<()> {
73/// let mem_end = calc_offset(offset, count)?;
74/// if mem_end > 100 {
75/// return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
76/// }
77/// # Ok(())
78/// # }
79/// ```
80pub fn calc_offset(base: u64, offset: u64) -> Result<u64> {
81 match base.checked_add(offset) {
82 None => Err(Error::Overflow { base, offset }),
83 Some(m) => Ok(m),
84 }
85}
86
87/// Trait for types that support raw volatile access to their data.
88pub trait VolatileMemory {
89 /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
90 /// access.
91 fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>;
92
93 /// Gets a `VolatileRef` at `offset`.
94 fn get_ref<T: DataInit>(&self, offset: u64) -> Result<VolatileRef<T>> {
95 let slice = self.get_slice(offset, size_of::<T>() as u64)?;
96 Ok(VolatileRef {
97 addr: slice.addr as *mut T,
98 phantom: PhantomData,
99 })
100 }
101}
102
103impl<'a> VolatileMemory for &'a mut [u8] {
104 fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
105 let mem_end = calc_offset(offset, count)?;
106 if mem_end > self.len() as u64 {
107 return Err(Error::OutOfBounds { addr: mem_end });
108 }
109 Ok(unsafe { VolatileSlice::new((self.as_ptr() as u64 + offset) as *mut _, count) })
110 }
111}
112
113/// A slice of raw memory that supports volatile access.
114#[derive(Copy, Clone, Debug)]
115pub struct VolatileSlice<'a> {
116 addr: *mut u8,
117 size: u64,
118 phantom: PhantomData<&'a u8>,
119}
120
121impl<'a> Default for VolatileSlice<'a> {
122 fn default() -> VolatileSlice<'a> {
123 VolatileSlice {
124 addr: null_mut(),
125 size: 0,
126 phantom: PhantomData,
127 }
128 }
129}
130
131impl<'a> VolatileSlice<'a> {
132 /// Creates a slice of raw memory that must support volatile access.
133 ///
134 /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
135 /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
136 /// must also guarantee that all other users of the given chunk of memory are using volatile
137 /// accesses.
138 pub unsafe fn new(addr: *mut u8, size: u64) -> VolatileSlice<'a> {
139 VolatileSlice {
140 addr,
141 size,
142 phantom: PhantomData,
143 }
144 }
145
146 /// Gets the address of this slice's memory.
147 pub fn as_ptr(&self) -> *mut u8 {
148 self.addr
149 }
150
151 /// Gets the size of this slice.
152 pub fn size(&self) -> u64 {
153 self.size
154 }
155
156 /// Creates a copy of this slice with the address increased by `count` bytes, and the size
157 /// reduced by `count` bytes.
158 pub fn offset(self, count: u64) -> Result<VolatileSlice<'a>> {
159 let new_addr =
160 (self.addr as u64)
161 .checked_add(count)
162 .ok_or(VolatileMemoryError::Overflow {
163 base: self.addr as u64,
164 offset: count,
165 })?;
166 if new_addr > usize::MAX as u64 {
167 return Err(VolatileMemoryError::Overflow {
168 base: self.addr as u64,
169 offset: count,
170 })?;
171 }
172 let new_size = self
173 .size
174 .checked_sub(count)
175 .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
176 // Safe because the memory has the same lifetime and points to a subset of the memory of the
177 // original slice.
178 unsafe { Ok(VolatileSlice::new(new_addr as *mut u8, new_size)) }
179 }
180
181 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
182 /// `buf`.
183 ///
184 /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
185 ///
186 /// # Examples
187 ///
188 /// ```
189 /// # use std::fs::File;
190 /// # use std::path::Path;
191 /// # use data_model::VolatileMemory;
192 /// # fn test_write_null() -> Result<(), ()> {
193 /// let mut mem = [0u8; 32];
194 /// let mem_ref = &mut mem[..];
195 /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
196 /// let mut buf = [5u8; 16];
197 /// vslice.copy_to(&mut buf[..]);
198 /// for v in &buf[..] {
199 /// assert_eq!(buf[0], 0);
200 /// }
201 /// # Ok(())
202 /// # }
203 /// ```
204 pub fn copy_to<T>(&self, buf: &mut [T])
205 where
206 T: DataInit,
207 {
208 let mut addr = self.addr;
209 for v in buf.iter_mut().take(self.size as usize / size_of::<T>()) {
210 unsafe {
211 *v = read_volatile(addr as *const T);
212 addr = addr.add(size_of::<T>());
213 }
214 }
215 }
216
217 /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
218 ///
219 /// The copies happen in an undefined order.
220 /// # Examples
221 ///
222 /// ```
223 /// # use data_model::VolatileMemory;
224 /// # fn test_write_null() -> Result<(), ()> {
225 /// let mut mem = [0u8; 32];
226 /// let mem_ref = &mut mem[..];
227 /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
228 /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
229 /// # Ok(())
230 /// # }
231 /// ```
232 pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
233 unsafe {
234 copy(self.addr, slice.addr, min(self.size, slice.size) as usize);
235 }
236 }
237
238 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
239 /// this slice's memory.
240 ///
241 /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
242 ///
243 /// # Examples
244 ///
245 /// ```
246 /// # use std::fs::File;
247 /// # use std::path::Path;
248 /// # use data_model::VolatileMemory;
249 /// # fn test_write_null() -> Result<(), ()> {
250 /// let mut mem = [0u8; 32];
251 /// let mem_ref = &mut mem[..];
252 /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
253 /// let buf = [5u8; 64];
254 /// vslice.copy_from(&buf[..]);
255 /// for i in 0..4 {
256 /// assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
257 /// }
258 /// # Ok(())
259 /// # }
260 /// ```
261 pub fn copy_from<T>(&self, buf: &[T])
262 where
263 T: DataInit,
264 {
265 let mut addr = self.addr;
266 for &v in buf.iter().take(self.size as usize / size_of::<T>()) {
267 unsafe {
268 write_volatile(addr as *mut T, v);
269 addr = addr.add(size_of::<T>());
270 }
271 }
272 }
273
274 /// Attempt to write all data from memory to a writable object and returns how many bytes were
275 /// actually written on success.
276 ///
277 /// # Arguments
278 /// * `w` - Write from memory to `w`.
279 ///
280 /// # Examples
281 ///
282 /// * Write some bytes to /dev/null
283 ///
284 /// ```
285 /// # use std::fs::File;
286 /// # use std::path::Path;
287 /// # use data_model::VolatileMemory;
288 /// # fn test_write_null() -> Result<(), ()> {
289 /// # let mut mem = [0u8; 32];
290 /// # let mem_ref = &mut mem[..];
291 /// # let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
292 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
293 /// vslice.write_to(&mut file).map_err(|_| ())?;
294 /// # Ok(())
295 /// # }
296 /// ```
297 pub fn write_to<T: Write>(&self, w: &mut T) -> IoResult<usize> {
298 w.write(unsafe { self.as_slice() })
299 }
300
301 /// Writes all data from memory to a writable object via `Write::write_all`.
302 ///
303 /// # Arguments
304 /// * `w` - Write from memory to `w`.
305 ///
306 /// # Examples
307 ///
308 /// * Write some bytes to /dev/null
309 ///
310 /// ```
311 /// # use std::fs::File;
312 /// # use std::path::Path;
313 /// # use data_model::VolatileMemory;
314 /// # fn test_write_null() -> Result<(), ()> {
315 /// # let mut mem = [0u8; 32];
316 /// # let mem_ref = &mut mem[..];
317 /// # let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
318 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
319 /// vslice.write_all_to(&mut file).map_err(|_| ())?;
320 /// # Ok(())
321 /// # }
322 /// ```
323 pub fn write_all_to<T: Write>(&self, w: &mut T) -> IoResult<()> {
324 w.write_all(unsafe { self.as_slice() })
325 }
326
327 /// Reads up to this slice's size to memory from a readable object and returns how many bytes
328 /// were actually read on success.
329 ///
330 /// # Arguments
331 /// * `r` - Read to `r` to memory.
332 ///
333 /// # Examples
334 ///
335 /// * Read some bytes to /dev/null
336 ///
337 /// ```
338 /// # use std::fs::File;
339 /// # use std::path::Path;
340 /// # use data_model::VolatileMemory;
341 /// # fn test_write_null() -> Result<(), ()> {
342 /// # let mut mem = [0u8; 32];
343 /// # let mem_ref = &mut mem[..];
344 /// # let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
345 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
346 /// vslice.read_from(&mut file).map_err(|_| ())?;
347 /// # Ok(())
348 /// # }
349 /// ```
350 pub fn read_from<T: Read>(&self, r: &mut T) -> IoResult<usize> {
351 r.read(unsafe { self.as_mut_slice() })
352 }
353
354 /// Read exactly this slice's size into memory from to a readable object via `Read::read_exact`.
355 ///
356 /// # Arguments
357 /// * `r` - Read to `r` to memory.
358 ///
359 /// # Examples
360 ///
361 /// * Read some bytes to /dev/null
362 ///
363 /// ```
364 /// # use std::fs::File;
365 /// # use std::path::Path;
366 /// # use data_model::VolatileMemory;
367 /// # fn test_write_null() -> Result<(), ()> {
368 /// # let mut mem = [0u8; 32];
369 /// # let mem_ref = &mut mem[..];
370 /// # let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
371 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
372 /// vslice.read_from(&mut file).map_err(|_| ())?;
373 /// # Ok(())
374 /// # }
375 /// ```
376 pub fn read_exact_from<T: Read>(&self, r: &mut T) -> IoResult<()> {
377 r.read_exact(unsafe { self.as_mut_slice() })
378 }
379
380 // These function are private and only used for the read/write functions. It is not valid in
381 // general to take slices of volatile memory.
382 unsafe fn as_slice(&self) -> &[u8] {
383 from_raw_parts(self.addr, self.size as usize)
384 }
385 unsafe fn as_mut_slice(&self) -> &mut [u8] {
386 from_raw_parts_mut(self.addr, self.size as usize)
387 }
388}
389
390impl<'a> VolatileMemory for VolatileSlice<'a> {
391 fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
392 let mem_end = calc_offset(offset, count)?;
393 if mem_end > self.size {
394 return Err(Error::OutOfBounds { addr: mem_end });
395 }
396 Ok(VolatileSlice {
397 addr: (self.addr as u64 + offset) as *mut _,
398 size: count,
399 phantom: PhantomData,
400 })
401 }
402}
403
404/// A memory location that supports volatile access of a `T`.
405///
406/// # Examples
407///
408/// ```
409/// # use data_model::VolatileRef;
410/// let mut v = 5u32;
411/// assert_eq!(v, 5);
412/// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32) };
413/// assert_eq!(v_ref.load(), 5);
414/// v_ref.store(500);
415/// assert_eq!(v, 500);
416#[derive(Debug)]
417pub struct VolatileRef<'a, T: DataInit>
418where
419 T: 'a,
420{
421 addr: *mut T,
422 phantom: PhantomData<&'a T>,
423}
424
425impl<'a, T: DataInit> VolatileRef<'a, T> {
426 /// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
427 ///
428 /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
429 /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
430 /// must also guarantee that all other users of the given chunk of memory are using volatile
431 /// accesses.
432 pub unsafe fn new(addr: *mut T) -> VolatileRef<'a, T> {
433 VolatileRef {
434 addr,
435 phantom: PhantomData,
436 }
437 }
438
439 /// Gets the address of this slice's memory.
440 pub fn as_ptr(&self) -> *mut T {
441 self.addr
442 }
443
444 /// Gets the size of this slice.
445 ///
446 /// # Examples
447 ///
448 /// ```
449 /// # use std::mem::size_of;
450 /// # use data_model::VolatileRef;
451 /// let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
452 /// assert_eq!(v_ref.size(), size_of::<u32>() as u64);
453 /// ```
454 pub fn size(&self) -> u64 {
455 size_of::<T>() as u64
456 }
457
458 /// Does a volatile write of the value `v` to the address of this ref.
459 #[inline(always)]
460 pub fn store(&self, v: T) {
461 unsafe { write_volatile(self.addr, v) };
462 }
463
464 /// Does a volatile read of the value at the address of this ref.
465 #[inline(always)]
466 pub fn load(&self) -> T {
467 // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
468 // in this function with the commented code below and running `cargo test --release`.
469 // unsafe { *(self.addr as *const T) }
470 unsafe { read_volatile(self.addr) }
471 }
472
473 /// Converts this `T` reference to a raw slice with the same size and address.
474 pub fn to_slice(&self) -> VolatileSlice<'a> {
475 unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>() as u64) }
476 }
477}
478
479#[cfg(test)]
480mod tests {
481 use super::*;
482
483 use std::sync::Arc;
484 use std::thread::{sleep, spawn};
485 use std::time::Duration;
486
487 #[derive(Clone)]
488 struct VecMem {
489 mem: Arc<Vec<u8>>,
490 }
491
492 impl VecMem {
493 fn new(size: usize) -> VecMem {
494 let mut mem = Vec::new();
495 mem.resize(size, 0);
496 VecMem { mem: Arc::new(mem) }
497 }
498 }
499
500 impl VolatileMemory for VecMem {
501 fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
502 let mem_end = calc_offset(offset, count)?;
503 if mem_end > self.mem.len() as u64 {
504 return Err(Error::OutOfBounds { addr: mem_end });
505 }
506 Ok(unsafe { VolatileSlice::new((self.mem.as_ptr() as u64 + offset) as *mut _, count) })
507 }
508 }
509
510 #[test]
511 fn ref_store() {
512 let mut a = [0u8; 1];
513 {
514 let a_ref = &mut a[..];
515 let v_ref = a_ref.get_ref(0).unwrap();
516 v_ref.store(2u8);
517 }
518 assert_eq!(a[0], 2);
519 }
520
521 #[test]
522 fn ref_load() {
523 let mut a = [5u8; 1];
524 {
525 let a_ref = &mut a[..];
526 let c = {
527 let v_ref = a_ref.get_ref::<u8>(0).unwrap();
528 assert_eq!(v_ref.load(), 5u8);
529 v_ref
530 };
531 // To make sure we can take a v_ref out of the scope we made it in:
532 c.load();
533 // but not too far:
534 // c
535 } //.load()
536 ;
537 }
538
539 #[test]
540 fn ref_to_slice() {
541 let mut a = [1u8; 5];
542 let a_ref = &mut a[..];
543 let v_ref = a_ref.get_ref(1).unwrap();
544 v_ref.store(0x12345678u32);
545 let ref_slice = v_ref.to_slice();
546 assert_eq!(v_ref.as_ptr() as u64, ref_slice.as_ptr() as u64);
547 assert_eq!(v_ref.size(), ref_slice.size());
548 }
549
550 #[test]
551 fn observe_mutate() {
552 let a = VecMem::new(1);
553 let a_clone = a.clone();
554 let v_ref = a.get_ref::<u8>(0).unwrap();
555 v_ref.store(99);
556 spawn(move || {
557 sleep(Duration::from_millis(10));
558 let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
559 clone_v_ref.store(0);
560 });
561
562 // Technically this is a race condition but we have to observe the v_ref's value changing
563 // somehow and this helps to ensure the sleep actually happens before the store rather then
564 // being reordered by the compiler.
565 assert_eq!(v_ref.load(), 99);
566
567 // Granted we could have a machine that manages to perform this many volatile loads in the
568 // amount of time the spawned thread sleeps, but the most likely reason the retry limit will
569 // get reached is because v_ref.load() is not actually performing the required volatile read
570 // or v_ref.store() is not doing a volatile write. A timer based solution was avoided
571 // because that might use a syscall which could hint the optimizer to reload v_ref's pointer
572 // regardless of volatile status. Note that we use a longer retry duration for optimized
573 // builds.
574 #[cfg(debug_assertions)]
575 const RETRY_MAX: u64 = 500_000_000;
576 #[cfg(not(debug_assertions))]
577 const RETRY_MAX: u64 = 10_000_000_000;
578
579 let mut retry = 0;
580 while v_ref.load() == 99 && retry < RETRY_MAX {
581 retry += 1;
582 }
583
584 assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
585 assert_eq!(v_ref.load(), 0);
586 }
587
588 #[test]
589 fn slice_size() {
590 let a = VecMem::new(100);
591 let s = a.get_slice(0, 27).unwrap();
592 assert_eq!(s.size(), 27);
593
594 let s = a.get_slice(34, 27).unwrap();
595 assert_eq!(s.size(), 27);
596
597 let s = s.get_slice(20, 5).unwrap();
598 assert_eq!(s.size(), 5);
599 }
600
601 #[test]
602 fn slice_overflow_error() {
603 use std::u64::MAX;
604 let a = VecMem::new(1);
605 let res = a.get_slice(MAX, 1).unwrap_err();
606 assert_eq!(
607 res,
608 Error::Overflow {
609 base: MAX,
610 offset: 1,
611 }
612 );
613 }
614
615 #[test]
616 fn slice_oob_error() {
617 let a = VecMem::new(100);
618 a.get_slice(50, 50).unwrap();
619 let res = a.get_slice(55, 50).unwrap_err();
620 assert_eq!(res, Error::OutOfBounds { addr: 105 });
621 }
622
623 #[test]
624 fn ref_overflow_error() {
625 use std::u64::MAX;
626 let a = VecMem::new(1);
627 let res = a.get_ref::<u8>(MAX).unwrap_err();
628 assert_eq!(
629 res,
630 Error::Overflow {
631 base: MAX,
632 offset: 1,
633 }
634 );
635 }
636
637 #[test]
638 fn ref_oob_error() {
639 let a = VecMem::new(100);
640 a.get_ref::<u8>(99).unwrap();
641 let res = a.get_ref::<u16>(99).unwrap_err();
642 assert_eq!(res, Error::OutOfBounds { addr: 101 });
643 }
644
645 #[test]
646 fn ref_oob_too_large() {
647 let a = VecMem::new(3);
648 let res = a.get_ref::<u32>(0).unwrap_err();
649 assert_eq!(res, Error::OutOfBounds { addr: 4 });
650 }
651}