sys_util/mmap.rs
1// Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! The mmap module provides a safe interface to mmap memory and ensures unmap is called when the
6//! mmap object leaves scope.
7
8use std;
9use std::io::{Read, Write};
10use std::os::unix::io::AsRawFd;
11use std::ptr::null_mut;
12
13use libc;
14
15use errno;
16
17use data_model::volatile_memory::*;
18use data_model::DataInit;
19
20#[derive(Debug)]
21pub enum Error {
22 /// Requested memory out of range.
23 InvalidAddress,
24 /// Requested offset is out of range of `libc::off_t`.
25 InvalidOffset,
26 /// Requested memory range spans past the end of the region.
27 InvalidRange(usize, usize),
28 /// Couldn't read from the given source.
29 ReadFromSource(std::io::Error),
30 /// `mmap` returned the given error.
31 SystemCallFailed(errno::Error),
32 /// Writing to memory failed
33 WriteToMemory(std::io::Error),
34 /// Reading from memory failed
35 ReadFromMemory(std::io::Error),
36}
37pub type Result<T> = std::result::Result<T, Error>;
38
39/// Wraps an anonymous shared memory mapping in the current process.
40#[derive(Debug)]
41pub struct MemoryMapping {
42 addr: *mut u8,
43 size: usize,
44}
45
46// Send and Sync aren't automatically inherited for the raw address pointer.
47// Accessing that pointer is only done through the stateless interface which
48// allows the object to be shared by multiple threads without a decrease in
49// safety.
50unsafe impl Send for MemoryMapping {}
51unsafe impl Sync for MemoryMapping {}
52
53impl MemoryMapping {
54 /// Creates an anonymous shared mapping of `size` bytes.
55 ///
56 /// # Arguments
57 /// * `size` - Size of memory region in bytes.
58 pub fn new(size: usize) -> Result<MemoryMapping> {
59 // This is safe because we are creating an anonymous mapping in a place not already used by
60 // any other area in this process.
61 let addr = unsafe {
62 libc::mmap(
63 null_mut(),
64 size,
65 libc::PROT_READ | libc::PROT_WRITE,
66 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
67 -1,
68 0,
69 )
70 };
71 if addr == libc::MAP_FAILED {
72 return Err(Error::SystemCallFailed(errno::Error::last()));
73 }
74 // This is safe because we call madvise with a valid address and size, and we check the
75 // return value. We only warn about an error because failure here is not fatal to the mmap.
76 if unsafe { libc::madvise(addr, size, libc::MADV_DONTDUMP) } == -1 {
77 warn!(
78 "failed madvise(MADV_DONTDUMP) on mmap: {:?}",
79 errno::Error::last()
80 );
81 }
82 Ok(MemoryMapping {
83 addr: addr as *mut u8,
84 size,
85 })
86 }
87
88 /// Maps the first `size` bytes of the given `fd`.
89 ///
90 /// # Arguments
91 /// * `fd` - File descriptor to mmap from.
92 /// * `size` - Size of memory region in bytes.
93 pub fn from_fd(fd: &AsRawFd, size: usize) -> Result<MemoryMapping> {
94 MemoryMapping::from_fd_offset(fd, size, 0)
95 }
96
97 /// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
98 ///
99 /// # Arguments
100 /// * `fd` - File descriptor to mmap from.
101 /// * `size` - Size of memory region in bytes.
102 /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
103 pub fn from_fd_offset(fd: &AsRawFd, size: usize, offset: usize) -> Result<MemoryMapping> {
104 if offset > libc::off_t::max_value() as usize {
105 return Err(Error::InvalidOffset);
106 }
107 // This is safe because we are creating a mapping in a place not already used by any other
108 // area in this process.
109 let addr = unsafe {
110 libc::mmap(
111 null_mut(),
112 size,
113 libc::PROT_READ | libc::PROT_WRITE,
114 libc::MAP_SHARED,
115 fd.as_raw_fd(),
116 offset as libc::off_t,
117 )
118 };
119 if addr == libc::MAP_FAILED {
120 return Err(Error::SystemCallFailed(errno::Error::last()));
121 }
122 // This is safe because we call madvise with a valid address and size, and we check the
123 // return value. We only warn about an error because failure here is not fatal to the mmap.
124 if unsafe { libc::madvise(addr, size, libc::MADV_DONTDUMP) } == -1 {
125 warn!(
126 "failed madvise(MADV_DONTDUMP) on mmap: {:?}",
127 errno::Error::last()
128 );
129 }
130 Ok(MemoryMapping {
131 addr: addr as *mut u8,
132 size,
133 })
134 }
135
136 /// Returns a pointer to the begining of the memory region. Should only be
137 /// used for passing this region to ioctls for setting guest memory.
138 pub fn as_ptr(&self) -> *mut u8 {
139 self.addr
140 }
141
142 /// Returns the size of the memory region in bytes.
143 pub fn size(&self) -> usize {
144 self.size
145 }
146
147 /// Writes a slice to the memory region at the specified offset.
148 /// Returns the number of bytes written. The number of bytes written can
149 /// be less than the length of the slice if there isn't enough room in the
150 /// memory region.
151 ///
152 /// # Examples
153 /// * Write a slice at offset 256.
154 ///
155 /// ```
156 /// # use sys_util::MemoryMapping;
157 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
158 /// let res = mem_map.write_slice(&[1,2,3,4,5], 256);
159 /// assert!(res.is_ok());
160 /// assert_eq!(res.unwrap(), 5);
161 /// ```
162 pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
163 if offset >= self.size {
164 return Err(Error::InvalidAddress);
165 }
166 unsafe {
167 // Guest memory can't strictly be modeled as a slice because it is
168 // volatile. Writing to it with what compiles down to a memcpy
169 // won't hurt anything as long as we get the bounds checks right.
170 let mut slice: &mut [u8] = &mut self.as_mut_slice()[offset..];
171 Ok(slice.write(buf).map_err(Error::WriteToMemory)?)
172 }
173 }
174
175 /// Reads to a slice from the memory region at the specified offset.
176 /// Returns the number of bytes read. The number of bytes read can
177 /// be less than the length of the slice if there isn't enough room in the
178 /// memory region.
179 ///
180 /// # Examples
181 /// * Read a slice of size 16 at offset 256.
182 ///
183 /// ```
184 /// # use sys_util::MemoryMapping;
185 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
186 /// let buf = &mut [0u8; 16];
187 /// let res = mem_map.read_slice(buf, 256);
188 /// assert!(res.is_ok());
189 /// assert_eq!(res.unwrap(), 16);
190 /// ```
191 pub fn read_slice(&self, mut buf: &mut [u8], offset: usize) -> Result<usize> {
192 if offset >= self.size {
193 return Err(Error::InvalidAddress);
194 }
195 unsafe {
196 // Guest memory can't strictly be modeled as a slice because it is
197 // volatile. Writing to it with what compiles down to a memcpy
198 // won't hurt anything as long as we get the bounds checks right.
199 let slice: &[u8] = &self.as_slice()[offset..];
200 Ok(buf.write(slice).map_err(Error::ReadFromMemory)?)
201 }
202 }
203
204 /// Writes an object to the memory region at the specified offset.
205 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
206 ///
207 /// # Examples
208 /// * Write a u64 at offset 16.
209 ///
210 /// ```
211 /// # use sys_util::MemoryMapping;
212 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
213 /// let res = mem_map.write_obj(55u64, 16);
214 /// assert!(res.is_ok());
215 /// ```
216 pub fn write_obj<T: DataInit>(&self, val: T, offset: usize) -> Result<()> {
217 unsafe {
218 // Guest memory can't strictly be modeled as a slice because it is
219 // volatile. Writing to it with what compiles down to a memcpy
220 // won't hurt anything as long as we get the bounds checks right.
221 self.range_end(offset, std::mem::size_of::<T>())?;
222 std::ptr::write_volatile(&mut self.as_mut_slice()[offset..] as *mut _ as *mut T, val);
223 Ok(())
224 }
225 }
226
227 /// Reads on object from the memory region at the given offset.
228 /// Reading from a volatile area isn't strictly safe as it could change
229 /// mid-read. However, as long as the type T is plain old data and can
230 /// handle random initialization, everything will be OK.
231 ///
232 /// # Examples
233 /// * Read a u64 written to offset 32.
234 ///
235 /// ```
236 /// # use sys_util::MemoryMapping;
237 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
238 /// let res = mem_map.write_obj(55u64, 32);
239 /// assert!(res.is_ok());
240 /// let num: u64 = mem_map.read_obj(32).unwrap();
241 /// assert_eq!(55, num);
242 /// ```
243 pub fn read_obj<T: DataInit>(&self, offset: usize) -> Result<T> {
244 self.range_end(offset, std::mem::size_of::<T>())?;
245 unsafe {
246 // This is safe because by definition Copy types can have their bits
247 // set arbitrarily and still be valid.
248 Ok(std::ptr::read_volatile(
249 &self.as_slice()[offset..] as *const _ as *const T,
250 ))
251 }
252 }
253
254 /// Reads data from a readable object like a File and writes it to guest memory.
255 ///
256 /// # Arguments
257 /// * `mem_offset` - Begin writing memory at this offset.
258 /// * `src` - Read from `src` to memory.
259 /// * `count` - Read `count` bytes from `src` to memory.
260 ///
261 /// # Examples
262 ///
263 /// * Read bytes from /dev/urandom
264 ///
265 /// ```
266 /// # use sys_util::MemoryMapping;
267 /// # use std::fs::File;
268 /// # use std::path::Path;
269 /// # fn test_read_random() -> Result<u32, ()> {
270 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
271 /// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
272 /// mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
273 /// let rand_val: u32 = mem_map.read_obj(40).map_err(|_| ())?;
274 /// # Ok(rand_val)
275 /// # }
276 /// ```
277 pub fn read_to_memory<F>(&self, mem_offset: usize, src: &mut F, count: usize) -> Result<()>
278 where
279 F: Read,
280 {
281 let mem_end = self
282 .range_end(mem_offset, count)
283 .map_err(|_| Error::InvalidRange(mem_offset, count))?;
284 unsafe {
285 // It is safe to overwrite the volatile memory. Acessing the guest
286 // memory as a mutable slice is OK because nothing assumes another
287 // thread won't change what is loaded.
288 let dst = &mut self.as_mut_slice()[mem_offset..mem_end];
289 src.read_exact(dst).map_err(Error::ReadFromSource)?;
290 }
291 Ok(())
292 }
293
294 /// Writes data from memory to a writable object.
295 ///
296 /// # Arguments
297 /// * `mem_offset` - Begin reading memory from this offset.
298 /// * `dst` - Write from memory to `dst`.
299 /// * `count` - Read `count` bytes from memory to `src`.
300 ///
301 /// # Examples
302 ///
303 /// * Write 128 bytes to /dev/null
304 ///
305 /// ```
306 /// # use sys_util::MemoryMapping;
307 /// # use std::fs::File;
308 /// # use std::path::Path;
309 /// # fn test_write_null() -> Result<(), ()> {
310 /// # let mut mem_map = MemoryMapping::new(1024).unwrap();
311 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
312 /// mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
313 /// # Ok(())
314 /// # }
315 /// ```
316 pub fn write_from_memory<F>(&self, mem_offset: usize, dst: &mut F, count: usize) -> Result<()>
317 where
318 F: Write,
319 {
320 let mem_end = self
321 .range_end(mem_offset, count)
322 .map_err(|_| Error::InvalidRange(mem_offset, count))?;
323 unsafe {
324 // It is safe to read from volatile memory. Acessing the guest
325 // memory as a slice is OK because nothing assumes another thread
326 // won't change what is loaded.
327 let src = &self.as_mut_slice()[mem_offset..mem_end];
328 dst.write_all(src).map_err(Error::ReadFromSource)?;
329 }
330 Ok(())
331 }
332
333 /// Uses madvise to tell the kernel to remove the specified range. Subsequent reads
334 /// to the pages in the range will return zero bytes.
335 pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
336 self.range_end(mem_offset, count)
337 .map_err(|_| Error::InvalidRange(mem_offset, count))?;
338 let ret = unsafe {
339 // madvising away the region is the same as the guest changing it.
340 // Next time it is read, it may return zero pages.
341 libc::madvise(
342 (self.addr as usize + mem_offset) as *mut _,
343 count,
344 libc::MADV_REMOVE,
345 )
346 };
347 if ret < 0 {
348 Err(Error::InvalidRange(mem_offset, count))
349 } else {
350 Ok(())
351 }
352 }
353
354 unsafe fn as_slice(&self) -> &[u8] {
355 // This is safe because we mapped the area at addr ourselves, so this slice will not
356 // overflow. However, it is possible to alias.
357 std::slice::from_raw_parts(self.addr, self.size)
358 }
359
360 unsafe fn as_mut_slice(&self) -> &mut [u8] {
361 // This is safe because we mapped the area at addr ourselves, so this slice will not
362 // overflow. However, it is possible to alias.
363 std::slice::from_raw_parts_mut(self.addr, self.size)
364 }
365
366 // Check that offset+count is valid and return the sum.
367 fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
368 let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
369 if mem_end > self.size() {
370 return Err(Error::InvalidAddress);
371 }
372 Ok(mem_end)
373 }
374}
375
376impl VolatileMemory for MemoryMapping {
377 fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
378 let mem_end = calc_offset(offset, count)?;
379 if mem_end > self.size as u64 {
380 return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
381 }
382
383 // Safe because we checked that offset + count was within our range and we only ever hand
384 // out volatile accessors.
385 Ok(unsafe { VolatileSlice::new((self.addr as usize + offset as usize) as *mut _, count) })
386 }
387}
388
389impl Drop for MemoryMapping {
390 fn drop(&mut self) {
391 // This is safe because we mmap the area at addr ourselves, and nobody
392 // else is holding a reference to it.
393 unsafe {
394 libc::munmap(self.addr as *mut libc::c_void, self.size);
395 }
396 }
397}
398
399#[cfg(test)]
400mod tests {
401 use super::*;
402 use data_model::{VolatileMemory, VolatileMemoryError};
403 use std::os::unix::io::FromRawFd;
404
405 #[test]
406 fn basic_map() {
407 let m = MemoryMapping::new(1024).unwrap();
408 assert_eq!(1024, m.size());
409 }
410
411 #[test]
412 fn map_invalid_size() {
413 let res = MemoryMapping::new(0).unwrap_err();
414 if let Error::SystemCallFailed(e) = res {
415 assert_eq!(e.errno(), libc::EINVAL);
416 } else {
417 panic!("unexpected error: {:?}", res);
418 }
419 }
420
421 #[test]
422 fn map_invalid_fd() {
423 let fd = unsafe { std::fs::File::from_raw_fd(-1) };
424 let res = MemoryMapping::from_fd(&fd, 1024).unwrap_err();
425 if let Error::SystemCallFailed(e) = res {
426 assert_eq!(e.errno(), libc::EBADF);
427 } else {
428 panic!("unexpected error: {:?}", res);
429 }
430 }
431
432 #[test]
433 fn test_write_past_end() {
434 let m = MemoryMapping::new(5).unwrap();
435 let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
436 assert!(res.is_ok());
437 assert_eq!(res.unwrap(), 5);
438 }
439
440 #[test]
441 fn slice_size() {
442 let m = MemoryMapping::new(5).unwrap();
443 let s = m.get_slice(2, 3).unwrap();
444 assert_eq!(s.size(), 3);
445 }
446
447 #[test]
448 fn slice_addr() {
449 let m = MemoryMapping::new(5).unwrap();
450 let s = m.get_slice(2, 3).unwrap();
451 assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
452 }
453
454 #[test]
455 fn slice_store() {
456 let m = MemoryMapping::new(5).unwrap();
457 let r = m.get_ref(2).unwrap();
458 r.store(9u16);
459 assert_eq!(m.read_obj::<u16>(2).unwrap(), 9);
460 }
461
462 #[test]
463 fn slice_overflow_error() {
464 let m = MemoryMapping::new(5).unwrap();
465 let res = m.get_slice(std::u64::MAX, 3).unwrap_err();
466 assert_eq!(
467 res,
468 VolatileMemoryError::Overflow {
469 base: std::u64::MAX,
470 offset: 3,
471 }
472 );
473 }
474 #[test]
475 fn slice_oob_error() {
476 let m = MemoryMapping::new(5).unwrap();
477 let res = m.get_slice(3, 3).unwrap_err();
478 assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
479 }
480
481 #[test]
482 fn from_fd_offset_invalid() {
483 let fd = unsafe { std::fs::File::from_raw_fd(-1) };
484 let res = MemoryMapping::from_fd_offset(&fd, 4096, (libc::off_t::max_value() as usize) + 1)
485 .unwrap_err();
486 match res {
487 Error::InvalidOffset => {}
488 e => panic!("unexpected error: {:?}", e),
489 }
490 }
491}