Skip to main content

virtio_drivers/device/
blk.rs

1//! Driver for VirtIO block devices.
2
3use crate::config::{ReadOnly, read_config};
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::{InterruptStatus, Transport};
7use crate::{Error, Result};
8use bitflags::bitflags;
9use log::info;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12const QUEUE: u16 = 0;
13const QUEUE_SIZE: u16 = 16;
14const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15    .union(BlkFeature::FLUSH)
16    .union(BlkFeature::RING_INDIRECT_DESC)
17    .union(BlkFeature::RING_EVENT_IDX)
18    .union(BlkFeature::VERSION_1);
19
20/// Driver for a VirtIO block device.
21///
22/// This is a simple virtual block device, e.g. disk.
23///
24/// Read and write requests (and other exotic requests) are placed in the queue and serviced
25/// (probably out of order) by the device except where noted.
26///
27/// # Example
28///
29/// ```
30/// # use virtio_drivers::{Error, Hal};
31/// # use virtio_drivers::transport::Transport;
32/// use virtio_drivers::device::blk::{VirtIOBlk, SECTOR_SIZE};
33///
34/// # fn example<HalImpl: Hal, T: Transport>(transport: T) -> Result<(), Error> {
35/// let mut disk = VirtIOBlk::<HalImpl, _>::new(transport)?;
36///
37/// println!("VirtIO block device: {} kB", disk.capacity() * SECTOR_SIZE as u64 / 2);
38///
39/// // Read sector 0 and then copy it to sector 1.
40/// let mut buf = [0; SECTOR_SIZE];
41/// disk.read_blocks(0, &mut buf)?;
42/// disk.write_blocks(1, &buf)?;
43/// # Ok(())
44/// # }
45/// ```
46pub struct VirtIOBlk<H: Hal, T: Transport> {
47    transport: T,
48    queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
49    capacity: u64,
50    negotiated_features: BlkFeature,
51}
52
53impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
54    /// Create a new VirtIO-Blk driver.
55    pub fn new(mut transport: T) -> Result<Self> {
56        let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
57
58        // Read configuration space.
59        let capacity = transport.read_consistent(|| {
60            Ok((read_config!(transport, BlkConfig, capacity_low)? as u64)
61                | ((read_config!(transport, BlkConfig, capacity_high)? as u64) << 32))
62        })?;
63        info!("found a block device of size {}KB", capacity / 2);
64
65        let queue = VirtQueue::new(
66            &mut transport,
67            QUEUE,
68            negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
69            negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
70        )?;
71        transport.finish_init();
72
73        Ok(VirtIOBlk {
74            transport,
75            queue,
76            capacity,
77            negotiated_features,
78        })
79    }
80
81    /// Gets the capacity of the block device, in 512 byte ([`SECTOR_SIZE`]) sectors.
82    pub fn capacity(&self) -> u64 {
83        self.capacity
84    }
85
86    /// Returns true if the block device is read-only, or false if it allows writes.
87    pub fn readonly(&self) -> bool {
88        self.negotiated_features.contains(BlkFeature::RO)
89    }
90
91    /// Acknowledges a pending interrupt, if any.
92    ///
93    /// Returns true if there was an interrupt to acknowledge.
94    pub fn ack_interrupt(&mut self) -> InterruptStatus {
95        self.transport.ack_interrupt()
96    }
97
98    /// Enables interrupts from the device.
99    pub fn enable_interrupts(&mut self) {
100        self.queue.set_dev_notify(true);
101    }
102
103    /// Disables interrupts from the device.
104    pub fn disable_interrupts(&mut self) {
105        self.queue.set_dev_notify(false);
106    }
107
108    /// Sends the given request to the device and waits for a response, with no extra data.
109    fn request(&mut self, request: BlkReq) -> Result {
110        let mut resp = BlkResp::default();
111        self.queue.add_notify_wait_pop(
112            &[request.as_bytes()],
113            &mut [resp.as_mut_bytes()],
114            &mut self.transport,
115        )?;
116        resp.status.into()
117    }
118
119    /// Sends the given request to the device and waits for a response, including the given data.
120    fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
121        let mut resp = BlkResp::default();
122        self.queue.add_notify_wait_pop(
123            &[request.as_bytes()],
124            &mut [data, resp.as_mut_bytes()],
125            &mut self.transport,
126        )?;
127        resp.status.into()
128    }
129
130    /// Sends the given request and data to the device and waits for a response.
131    fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
132        let mut resp = BlkResp::default();
133        self.queue.add_notify_wait_pop(
134            &[request.as_bytes(), data],
135            &mut [resp.as_mut_bytes()],
136            &mut self.transport,
137        )?;
138        resp.status.into()
139    }
140
141    /// Requests the device to flush any pending writes to storage.
142    ///
143    /// This will be ignored if the device doesn't support the `VIRTIO_BLK_F_FLUSH` feature.
144    pub fn flush(&mut self) -> Result {
145        if self.negotiated_features.contains(BlkFeature::FLUSH) {
146            self.request(BlkReq {
147                type_: ReqType::Flush,
148                ..Default::default()
149            })
150        } else {
151            Ok(())
152        }
153    }
154
155    /// Gets the device ID.
156    ///
157    /// The ID is written as ASCII into the given buffer, which must be 20 bytes long, and the used
158    /// length returned.
159    pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
160        self.request_read(
161            BlkReq {
162                type_: ReqType::GetId,
163                ..Default::default()
164            },
165            id,
166        )?;
167
168        let length = id.iter().position(|&x| x == 0).unwrap_or(20);
169        Ok(length)
170    }
171
172    /// Reads one or more blocks into the given buffer.
173    ///
174    /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
175    ///
176    /// Blocks until the read completes or there is an error.
177    pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
178        assert_ne!(buf.len(), 0);
179        assert_eq!(buf.len() % SECTOR_SIZE, 0);
180        self.request_read(
181            BlkReq {
182                type_: ReqType::In,
183                reserved: 0,
184                sector: block_id as u64,
185            },
186            buf,
187        )
188    }
189
190    /// Submits a request to read one or more blocks, but returns immediately without waiting for
191    /// the read to complete.
192    ///
193    /// # Arguments
194    ///
195    /// * `block_id` - The identifier of the first block to read.
196    /// * `req` - A buffer which the driver can use for the request to send to the device. The
197    ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
198    ///   it needs to be valid (and not otherwise used) until the corresponding
199    ///   `complete_read_blocks` call. Its length must be a non-zero multiple of [`SECTOR_SIZE`].
200    /// * `buf` - The buffer in memory into which the block should be read.
201    /// * `resp` - A mutable reference to a variable provided by the caller
202    ///   to contain the status of the request. The caller can safely
203    ///   read the variable only after the request is complete.
204    ///
205    /// # Usage
206    ///
207    /// It will submit request to the VirtIO block device and return a token identifying
208    /// the position of the first Descriptor in the chain. If there are not enough
209    /// Descriptors to allocate, then it returns [`Error::QueueFull`].
210    ///
211    /// The caller can then call `peek_used` with the returned token to check whether the device has
212    /// finished handling the request. Once it has, the caller must call `complete_read_blocks` with
213    /// the same buffers before reading the response.
214    ///
215    /// ```
216    /// # use virtio_drivers::{Error, Hal};
217    /// # use virtio_drivers::device::blk::VirtIOBlk;
218    /// # use virtio_drivers::transport::Transport;
219    /// use virtio_drivers::device::blk::{BlkReq, BlkResp, RespStatus};
220    ///
221    /// # fn example<H: Hal, T: Transport>(blk: &mut VirtIOBlk<H, T>) -> Result<(), Error> {
222    /// let mut request = BlkReq::default();
223    /// let mut buffer = [0; 512];
224    /// let mut response = BlkResp::default();
225    /// let token = unsafe { blk.read_blocks_nb(42, &mut request, &mut buffer, &mut response) }?;
226    ///
227    /// // Wait for an interrupt to tell us that the request completed...
228    /// assert_eq!(blk.peek_used(), Some(token));
229    ///
230    /// unsafe {
231    ///   blk.complete_read_blocks(token, &request, &mut buffer, &mut response)?;
232    /// }
233    /// if response.status() == RespStatus::OK {
234    ///   println!("Successfully read block.");
235    /// } else {
236    ///   println!("Error {:?} reading block.", response.status());
237    /// }
238    /// # Ok(())
239    /// # }
240    /// ```
241    ///
242    /// # Safety
243    ///
244    /// `req`, `buf` and `resp` are still borrowed by the underlying VirtIO block device even after
245    /// this method returns. Thus, it is the caller's responsibility to guarantee that they are not
246    /// accessed before the request is completed in order to avoid data races.
247    pub unsafe fn read_blocks_nb(
248        &mut self,
249        block_id: usize,
250        req: &mut BlkReq,
251        buf: &mut [u8],
252        resp: &mut BlkResp,
253    ) -> Result<u16> {
254        assert_ne!(buf.len(), 0);
255        assert_eq!(buf.len() % SECTOR_SIZE, 0);
256        *req = BlkReq {
257            type_: ReqType::In,
258            reserved: 0,
259            sector: block_id as u64,
260        };
261        // SAFETY: The caller promises that `req`, `buf` and `resp` are not accessed before the
262        // request is completed.
263        let token = unsafe {
264            self.queue
265                .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?
266        };
267        if self.queue.should_notify() {
268            self.transport.notify(QUEUE);
269        }
270        Ok(token)
271    }
272
273    /// Completes a read operation which was started by `read_blocks_nb`.
274    ///
275    /// # Safety
276    ///
277    /// The same buffers (`req`, `buf` and `resp`) must be passed in again as were passed to
278    /// `read_blocks_nb` when it returned the token.
279    pub unsafe fn complete_read_blocks(
280        &mut self,
281        token: u16,
282        req: &BlkReq,
283        buf: &mut [u8],
284        resp: &mut BlkResp,
285    ) -> Result<()> {
286        // SAFETY: The caller promises that `req`, `buf` and `resp` are the same that were passed to
287        // the corresponding `read_blocks_nb` call which added them to the queue.
288        unsafe {
289            self.queue
290                .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
291        }
292        resp.status.into()
293    }
294
295    /// Writes the contents of the given buffer to a block or blocks.
296    ///
297    /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
298    ///
299    /// Blocks until the write is complete or there is an error.
300    pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
301        assert_ne!(buf.len(), 0);
302        assert_eq!(buf.len() % SECTOR_SIZE, 0);
303        self.request_write(
304            BlkReq {
305                type_: ReqType::Out,
306                sector: block_id as u64,
307                ..Default::default()
308            },
309            buf,
310        )
311    }
312
313    /// Submits a request to write one or more blocks, but returns immediately without waiting for
314    /// the write to complete.
315    ///
316    /// # Arguments
317    ///
318    /// * `block_id` - The identifier of the first block to write.
319    /// * `req` - A buffer which the driver can use for the request to send to the device. The
320    ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
321    ///   it needs to be valid (and not otherwise used) until the corresponding
322    ///   `complete_write_blocks` call.
323    /// * `buf` - The buffer in memory containing the data to write to the blocks. Its length must
324    ///   be a non-zero multiple of [`SECTOR_SIZE`].
325    /// * `resp` - A mutable reference to a variable provided by the caller
326    ///   to contain the status of the request. The caller can safely
327    ///   read the variable only after the request is complete.
328    ///
329    /// # Usage
330    ///
331    /// See [VirtIOBlk::read_blocks_nb].
332    ///
333    /// # Safety
334    ///
335    /// See  [VirtIOBlk::read_blocks_nb].
336    pub unsafe fn write_blocks_nb(
337        &mut self,
338        block_id: usize,
339        req: &mut BlkReq,
340        buf: &[u8],
341        resp: &mut BlkResp,
342    ) -> Result<u16> {
343        assert_ne!(buf.len(), 0);
344        assert_eq!(buf.len() % SECTOR_SIZE, 0);
345        *req = BlkReq {
346            type_: ReqType::Out,
347            reserved: 0,
348            sector: block_id as u64,
349        };
350        // SAFETY: The caller promises that `req`, `buf` and `resp` are not accessed before the
351        // request is completed.
352        let token = unsafe {
353            self.queue
354                .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?
355        };
356        if self.queue.should_notify() {
357            self.transport.notify(QUEUE);
358        }
359        Ok(token)
360    }
361
362    /// Completes a write operation which was started by `write_blocks_nb`.
363    ///
364    /// # Safety
365    ///
366    /// The same buffers (`req`, `buf` and `resp`) must be passed in again as were passed to
367    /// `write_blocks_nb` when it returned the token.
368    pub unsafe fn complete_write_blocks(
369        &mut self,
370        token: u16,
371        req: &BlkReq,
372        buf: &[u8],
373        resp: &mut BlkResp,
374    ) -> Result<()> {
375        // SAFETY: The caller promises that `req`, `buf` and `resp` are the same that were passed to
376        // the corresponding `write_blocks_nb` call which added them to the queue.
377        unsafe {
378            self.queue
379                .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
380        }
381        resp.status.into()
382    }
383
384    /// Fetches the token of the next completed request from the used ring and returns it, without
385    /// removing it from the used ring. If there are no pending completed requests returns `None`.
386    pub fn peek_used(&mut self) -> Option<u16> {
387        self.queue.peek_used()
388    }
389
390    /// Returns the size of the device's VirtQueue.
391    ///
392    /// This can be used to tell the caller how many channels to monitor on.
393    pub fn virt_queue_size(&self) -> u16 {
394        QUEUE_SIZE
395    }
396}
397
398impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
399    fn drop(&mut self) {
400        // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
401        // after they have been freed.
402        self.transport.queue_unset(QUEUE);
403    }
404}
405
406#[derive(FromBytes, Immutable, IntoBytes)]
407#[repr(C)]
408struct BlkConfig {
409    /// Number of 512 Bytes sectors
410    capacity_low: ReadOnly<u32>,
411    capacity_high: ReadOnly<u32>,
412    size_max: ReadOnly<u32>,
413    seg_max: ReadOnly<u32>,
414    cylinders: ReadOnly<u16>,
415    heads: ReadOnly<u8>,
416    sectors: ReadOnly<u8>,
417    blk_size: ReadOnly<u32>,
418    physical_block_exp: ReadOnly<u8>,
419    alignment_offset: ReadOnly<u8>,
420    min_io_size: ReadOnly<u16>,
421    opt_io_size: ReadOnly<u32>,
422    // ... ignored
423}
424
425/// A VirtIO block device request.
426#[repr(C)]
427#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
428pub struct BlkReq {
429    type_: ReqType,
430    reserved: u32,
431    sector: u64,
432}
433
434impl Default for BlkReq {
435    fn default() -> Self {
436        Self {
437            type_: ReqType::In,
438            reserved: 0,
439            sector: 0,
440        }
441    }
442}
443
444/// Response of a VirtIOBlk request.
445#[repr(C)]
446#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
447pub struct BlkResp {
448    status: RespStatus,
449}
450
451impl BlkResp {
452    /// Return the status of a VirtIOBlk request.
453    pub fn status(&self) -> RespStatus {
454        self.status
455    }
456}
457
458#[repr(u32)]
459#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
460enum ReqType {
461    In = 0,
462    Out = 1,
463    Flush = 4,
464    GetId = 8,
465    GetLifetime = 10,
466    Discard = 11,
467    WriteZeroes = 13,
468    SecureErase = 14,
469}
470
471/// Status of a VirtIOBlk request.
472#[repr(transparent)]
473#[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
474pub struct RespStatus(u8);
475
476impl RespStatus {
477    /// Ok.
478    pub const OK: RespStatus = RespStatus(0);
479    /// IoErr.
480    pub const IO_ERR: RespStatus = RespStatus(1);
481    /// Unsupported yet.
482    pub const UNSUPPORTED: RespStatus = RespStatus(2);
483    /// Not ready.
484    pub const NOT_READY: RespStatus = RespStatus(3);
485}
486
487impl From<RespStatus> for Result {
488    fn from(status: RespStatus) -> Self {
489        match status {
490            RespStatus::OK => Ok(()),
491            RespStatus::IO_ERR => Err(Error::IoError),
492            RespStatus::UNSUPPORTED => Err(Error::Unsupported),
493            RespStatus::NOT_READY => Err(Error::NotReady),
494            _ => Err(Error::IoError),
495        }
496    }
497}
498
499impl Default for BlkResp {
500    fn default() -> Self {
501        BlkResp {
502            status: RespStatus::NOT_READY,
503        }
504    }
505}
506
507/// The standard sector size of a VirtIO block device. Data is read and written in multiples of this
508/// size.
509pub const SECTOR_SIZE: usize = 512;
510
511bitflags! {
512    #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
513    struct BlkFeature: u64 {
514        /// Device supports request barriers. (legacy)
515        const BARRIER       = 1 << 0;
516        /// Maximum size of any single segment is in `size_max`.
517        const SIZE_MAX      = 1 << 1;
518        /// Maximum number of segments in a request is in `seg_max`.
519        const SEG_MAX       = 1 << 2;
520        /// Disk-style geometry specified in geometry.
521        const GEOMETRY      = 1 << 4;
522        /// Device is read-only.
523        const RO            = 1 << 5;
524        /// Block size of disk is in `blk_size`.
525        const BLK_SIZE      = 1 << 6;
526        /// Device supports scsi packet commands. (legacy)
527        const SCSI          = 1 << 7;
528        /// Cache flush command support.
529        const FLUSH         = 1 << 9;
530        /// Device exports information on optimal I/O alignment.
531        const TOPOLOGY      = 1 << 10;
532        /// Device can toggle its cache between writeback and writethrough modes.
533        const CONFIG_WCE    = 1 << 11;
534        /// Device supports multiqueue.
535        const MQ            = 1 << 12;
536        /// Device can support discard command, maximum discard sectors size in
537        /// `max_discard_sectors` and maximum discard segment number in
538        /// `max_discard_seg`.
539        const DISCARD       = 1 << 13;
540        /// Device can support write zeroes command, maximum write zeroes sectors
541        /// size in `max_write_zeroes_sectors` and maximum write zeroes segment
542        /// number in `max_write_zeroes_seg`.
543        const WRITE_ZEROES  = 1 << 14;
544        /// Device supports providing storage lifetime information.
545        const LIFETIME      = 1 << 15;
546        /// Device can support the secure erase command.
547        const SECURE_ERASE  = 1 << 16;
548
549        // device independent
550        const NOTIFY_ON_EMPTY       = 1 << 24; // legacy
551        const ANY_LAYOUT            = 1 << 27; // legacy
552        const RING_INDIRECT_DESC    = 1 << 28;
553        const RING_EVENT_IDX        = 1 << 29;
554        const UNUSED                = 1 << 30; // legacy
555        const VERSION_1             = 1 << 32; // detect legacy
556
557        // the following since virtio v1.1
558        const ACCESS_PLATFORM       = 1 << 33;
559        const RING_PACKED           = 1 << 34;
560        const IN_ORDER              = 1 << 35;
561        const ORDER_PLATFORM        = 1 << 36;
562        const SR_IOV                = 1 << 37;
563        const NOTIFICATION_DATA     = 1 << 38;
564    }
565}
566
567#[cfg(test)]
568mod tests {
569    use super::*;
570    use crate::{
571        hal::fake::FakeHal,
572        transport::{
573            DeviceType,
574            fake::{FakeTransport, QueueStatus, State},
575        },
576    };
577    use alloc::{sync::Arc, vec};
578    use core::mem::size_of;
579    use std::{sync::Mutex, thread};
580
581    #[test]
582    fn config() {
583        let config_space = BlkConfig {
584            capacity_low: ReadOnly::new(0x42),
585            capacity_high: ReadOnly::new(0x02),
586            size_max: ReadOnly::new(0),
587            seg_max: ReadOnly::new(0),
588            cylinders: ReadOnly::new(0),
589            heads: ReadOnly::new(0),
590            sectors: ReadOnly::new(0),
591            blk_size: ReadOnly::new(0),
592            physical_block_exp: ReadOnly::new(0),
593            alignment_offset: ReadOnly::new(0),
594            min_io_size: ReadOnly::new(0),
595            opt_io_size: ReadOnly::new(0),
596        };
597        let state = Arc::new(Mutex::new(State::new(
598            vec![QueueStatus::default()],
599            config_space,
600        )));
601        let transport = FakeTransport {
602            device_type: DeviceType::Block,
603            max_queue_size: QUEUE_SIZE.into(),
604            device_features: BlkFeature::RO.bits(),
605            state: state.clone(),
606        };
607        let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
608
609        assert_eq!(blk.capacity(), 0x02_0000_0042);
610        assert_eq!(blk.readonly(), true);
611    }
612
613    #[test]
614    fn read() {
615        let config_space = BlkConfig {
616            capacity_low: ReadOnly::new(66),
617            capacity_high: ReadOnly::new(0),
618            size_max: ReadOnly::new(0),
619            seg_max: ReadOnly::new(0),
620            cylinders: ReadOnly::new(0),
621            heads: ReadOnly::new(0),
622            sectors: ReadOnly::new(0),
623            blk_size: ReadOnly::new(0),
624            physical_block_exp: ReadOnly::new(0),
625            alignment_offset: ReadOnly::new(0),
626            min_io_size: ReadOnly::new(0),
627            opt_io_size: ReadOnly::new(0),
628        };
629        let state = Arc::new(Mutex::new(State::new(
630            vec![QueueStatus::default()],
631            config_space,
632        )));
633        let transport = FakeTransport {
634            device_type: DeviceType::Block,
635            max_queue_size: QUEUE_SIZE.into(),
636            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
637            state: state.clone(),
638        };
639        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
640
641        // Start a thread to simulate the device waiting for a read request.
642        let handle = thread::spawn(move || {
643            println!("Device waiting for a request.");
644            State::wait_until_queue_notified(&state, QUEUE);
645            println!("Transmit queue was notified.");
646
647            assert!(
648                state
649                    .lock()
650                    .unwrap()
651                    .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
652                        assert_eq!(
653                            request,
654                            BlkReq {
655                                type_: ReqType::In,
656                                reserved: 0,
657                                sector: 42
658                            }
659                            .as_bytes()
660                        );
661
662                        let mut response = vec![0; SECTOR_SIZE];
663                        response[0..9].copy_from_slice(b"Test data");
664                        response.extend_from_slice(
665                            BlkResp {
666                                status: RespStatus::OK,
667                            }
668                            .as_bytes(),
669                        );
670
671                        response
672                    })
673            );
674        });
675
676        // Read a block from the device.
677        let mut buffer = [0; 512];
678        blk.read_blocks(42, &mut buffer).unwrap();
679        assert_eq!(&buffer[0..9], b"Test data");
680
681        handle.join().unwrap();
682    }
683
684    #[test]
685    fn write() {
686        let config_space = BlkConfig {
687            capacity_low: ReadOnly::new(66),
688            capacity_high: ReadOnly::new(0),
689            size_max: ReadOnly::new(0),
690            seg_max: ReadOnly::new(0),
691            cylinders: ReadOnly::new(0),
692            heads: ReadOnly::new(0),
693            sectors: ReadOnly::new(0),
694            blk_size: ReadOnly::new(0),
695            physical_block_exp: ReadOnly::new(0),
696            alignment_offset: ReadOnly::new(0),
697            min_io_size: ReadOnly::new(0),
698            opt_io_size: ReadOnly::new(0),
699        };
700        let state = Arc::new(Mutex::new(State::new(
701            vec![QueueStatus::default()],
702            config_space,
703        )));
704        let transport = FakeTransport {
705            device_type: DeviceType::Block,
706            max_queue_size: QUEUE_SIZE.into(),
707            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
708            state: state.clone(),
709        };
710        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
711
712        // Start a thread to simulate the device waiting for a write request.
713        let handle = thread::spawn(move || {
714            println!("Device waiting for a request.");
715            State::wait_until_queue_notified(&state, QUEUE);
716            println!("Transmit queue was notified.");
717
718            assert!(
719                state
720                    .lock()
721                    .unwrap()
722                    .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
723                        assert_eq!(
724                            &request[0..size_of::<BlkReq>()],
725                            BlkReq {
726                                type_: ReqType::Out,
727                                reserved: 0,
728                                sector: 42
729                            }
730                            .as_bytes()
731                        );
732                        let data = &request[size_of::<BlkReq>()..];
733                        assert_eq!(data.len(), SECTOR_SIZE);
734                        assert_eq!(&data[0..9], b"Test data");
735
736                        let mut response = Vec::new();
737                        response.extend_from_slice(
738                            BlkResp {
739                                status: RespStatus::OK,
740                            }
741                            .as_bytes(),
742                        );
743
744                        response
745                    })
746            );
747        });
748
749        // Write a block to the device.
750        let mut buffer = [0; 512];
751        buffer[0..9].copy_from_slice(b"Test data");
752        blk.write_blocks(42, &mut buffer).unwrap();
753
754        // Request to flush should be ignored as the device doesn't support it.
755        blk.flush().unwrap();
756
757        handle.join().unwrap();
758    }
759
760    #[test]
761    fn flush() {
762        let config_space = BlkConfig {
763            capacity_low: ReadOnly::new(66),
764            capacity_high: ReadOnly::new(0),
765            size_max: ReadOnly::new(0),
766            seg_max: ReadOnly::new(0),
767            cylinders: ReadOnly::new(0),
768            heads: ReadOnly::new(0),
769            sectors: ReadOnly::new(0),
770            blk_size: ReadOnly::new(0),
771            physical_block_exp: ReadOnly::new(0),
772            alignment_offset: ReadOnly::new(0),
773            min_io_size: ReadOnly::new(0),
774            opt_io_size: ReadOnly::new(0),
775        };
776        let state = Arc::new(Mutex::new(State::new(
777            vec![QueueStatus::default()],
778            config_space,
779        )));
780        let transport = FakeTransport {
781            device_type: DeviceType::Block,
782            max_queue_size: QUEUE_SIZE.into(),
783            device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
784            state: state.clone(),
785        };
786        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
787
788        // Start a thread to simulate the device waiting for a flush request.
789        let handle = thread::spawn(move || {
790            println!("Device waiting for a request.");
791            State::wait_until_queue_notified(&state, QUEUE);
792            println!("Transmit queue was notified.");
793
794            assert!(
795                state
796                    .lock()
797                    .unwrap()
798                    .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
799                        assert_eq!(
800                            request,
801                            BlkReq {
802                                type_: ReqType::Flush,
803                                reserved: 0,
804                                sector: 0,
805                            }
806                            .as_bytes()
807                        );
808
809                        let mut response = Vec::new();
810                        response.extend_from_slice(
811                            BlkResp {
812                                status: RespStatus::OK,
813                            }
814                            .as_bytes(),
815                        );
816
817                        response
818                    })
819            );
820        });
821
822        // Request to flush.
823        blk.flush().unwrap();
824
825        handle.join().unwrap();
826    }
827
828    #[test]
829    fn device_id() {
830        let config_space = BlkConfig {
831            capacity_low: ReadOnly::new(66),
832            capacity_high: ReadOnly::new(0),
833            size_max: ReadOnly::new(0),
834            seg_max: ReadOnly::new(0),
835            cylinders: ReadOnly::new(0),
836            heads: ReadOnly::new(0),
837            sectors: ReadOnly::new(0),
838            blk_size: ReadOnly::new(0),
839            physical_block_exp: ReadOnly::new(0),
840            alignment_offset: ReadOnly::new(0),
841            min_io_size: ReadOnly::new(0),
842            opt_io_size: ReadOnly::new(0),
843        };
844        let state = Arc::new(Mutex::new(State::new(
845            vec![QueueStatus::default()],
846            config_space,
847        )));
848        let transport = FakeTransport {
849            device_type: DeviceType::Block,
850            max_queue_size: QUEUE_SIZE.into(),
851            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
852            state: state.clone(),
853        };
854        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
855
856        // Start a thread to simulate the device waiting for a flush request.
857        let handle = thread::spawn(move || {
858            println!("Device waiting for a request.");
859            State::wait_until_queue_notified(&state, QUEUE);
860            println!("Transmit queue was notified.");
861
862            assert!(
863                state
864                    .lock()
865                    .unwrap()
866                    .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
867                        assert_eq!(
868                            request,
869                            BlkReq {
870                                type_: ReqType::GetId,
871                                reserved: 0,
872                                sector: 0,
873                            }
874                            .as_bytes()
875                        );
876
877                        let mut response = Vec::new();
878                        response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
879                        response.extend_from_slice(
880                            BlkResp {
881                                status: RespStatus::OK,
882                            }
883                            .as_bytes(),
884                        );
885
886                        response
887                    })
888            );
889        });
890
891        let mut id = [0; 20];
892        let length = blk.device_id(&mut id).unwrap();
893        assert_eq!(&id[0..length], b"device_id");
894
895        handle.join().unwrap();
896    }
897}