1use crate::config::{read_config, ReadOnly};
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::Transport;
7use crate::{Error, Result};
8use bitflags::bitflags;
9use log::info;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12const QUEUE: u16 = 0;
13const QUEUE_SIZE: u16 = 16;
14const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15 .union(BlkFeature::FLUSH)
16 .union(BlkFeature::RING_INDIRECT_DESC)
17 .union(BlkFeature::RING_EVENT_IDX);
18
19pub struct VirtIOBlk<H: Hal, T: Transport> {
46 transport: T,
47 queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
48 capacity: u64,
49 negotiated_features: BlkFeature,
50}
51
52impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
53 pub fn new(mut transport: T) -> Result<Self> {
55 let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
56
57 let capacity = transport.read_consistent(|| {
59 Ok((read_config!(transport, BlkConfig, capacity_low)? as u64)
60 | ((read_config!(transport, BlkConfig, capacity_high)? as u64) << 32))
61 })?;
62 info!("found a block device of size {}KB", capacity / 2);
63
64 let queue = VirtQueue::new(
65 &mut transport,
66 QUEUE,
67 negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
68 negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
69 )?;
70 transport.finish_init();
71
72 Ok(VirtIOBlk {
73 transport,
74 queue,
75 capacity,
76 negotiated_features,
77 })
78 }
79
80 pub fn capacity(&self) -> u64 {
82 self.capacity
83 }
84
85 pub fn readonly(&self) -> bool {
87 self.negotiated_features.contains(BlkFeature::RO)
88 }
89
90 pub fn ack_interrupt(&mut self) -> bool {
94 self.transport.ack_interrupt()
95 }
96
97 pub fn enable_interrupts(&mut self) {
99 self.queue.set_dev_notify(true);
100 }
101
102 pub fn disable_interrupts(&mut self) {
104 self.queue.set_dev_notify(false);
105 }
106
107 fn request(&mut self, request: BlkReq) -> Result {
109 let mut resp = BlkResp::default();
110 self.queue.add_notify_wait_pop(
111 &[request.as_bytes()],
112 &mut [resp.as_mut_bytes()],
113 &mut self.transport,
114 )?;
115 resp.status.into()
116 }
117
118 fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
120 let mut resp = BlkResp::default();
121 self.queue.add_notify_wait_pop(
122 &[request.as_bytes()],
123 &mut [data, resp.as_mut_bytes()],
124 &mut self.transport,
125 )?;
126 resp.status.into()
127 }
128
129 fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
131 let mut resp = BlkResp::default();
132 self.queue.add_notify_wait_pop(
133 &[request.as_bytes(), data],
134 &mut [resp.as_mut_bytes()],
135 &mut self.transport,
136 )?;
137 resp.status.into()
138 }
139
140 pub fn flush(&mut self) -> Result {
144 if self.negotiated_features.contains(BlkFeature::FLUSH) {
145 self.request(BlkReq {
146 type_: ReqType::Flush,
147 ..Default::default()
148 })
149 } else {
150 Ok(())
151 }
152 }
153
154 pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
159 self.request_read(
160 BlkReq {
161 type_: ReqType::GetId,
162 ..Default::default()
163 },
164 id,
165 )?;
166
167 let length = id.iter().position(|&x| x == 0).unwrap_or(20);
168 Ok(length)
169 }
170
171 pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
177 assert_ne!(buf.len(), 0);
178 assert_eq!(buf.len() % SECTOR_SIZE, 0);
179 self.request_read(
180 BlkReq {
181 type_: ReqType::In,
182 reserved: 0,
183 sector: block_id as u64,
184 },
185 buf,
186 )
187 }
188
189 pub unsafe fn read_blocks_nb(
247 &mut self,
248 block_id: usize,
249 req: &mut BlkReq,
250 buf: &mut [u8],
251 resp: &mut BlkResp,
252 ) -> Result<u16> {
253 assert_ne!(buf.len(), 0);
254 assert_eq!(buf.len() % SECTOR_SIZE, 0);
255 *req = BlkReq {
256 type_: ReqType::In,
257 reserved: 0,
258 sector: block_id as u64,
259 };
260 let token = self
261 .queue
262 .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
263 if self.queue.should_notify() {
264 self.transport.notify(QUEUE);
265 }
266 Ok(token)
267 }
268
269 pub unsafe fn complete_read_blocks(
276 &mut self,
277 token: u16,
278 req: &BlkReq,
279 buf: &mut [u8],
280 resp: &mut BlkResp,
281 ) -> Result<()> {
282 self.queue
283 .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
284 resp.status.into()
285 }
286
287 pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
293 assert_ne!(buf.len(), 0);
294 assert_eq!(buf.len() % SECTOR_SIZE, 0);
295 self.request_write(
296 BlkReq {
297 type_: ReqType::Out,
298 sector: block_id as u64,
299 ..Default::default()
300 },
301 buf,
302 )
303 }
304
305 pub unsafe fn write_blocks_nb(
329 &mut self,
330 block_id: usize,
331 req: &mut BlkReq,
332 buf: &[u8],
333 resp: &mut BlkResp,
334 ) -> Result<u16> {
335 assert_ne!(buf.len(), 0);
336 assert_eq!(buf.len() % SECTOR_SIZE, 0);
337 *req = BlkReq {
338 type_: ReqType::Out,
339 reserved: 0,
340 sector: block_id as u64,
341 };
342 let token = self
343 .queue
344 .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
345 if self.queue.should_notify() {
346 self.transport.notify(QUEUE);
347 }
348 Ok(token)
349 }
350
351 pub unsafe fn complete_write_blocks(
358 &mut self,
359 token: u16,
360 req: &BlkReq,
361 buf: &[u8],
362 resp: &mut BlkResp,
363 ) -> Result<()> {
364 self.queue
365 .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
366 resp.status.into()
367 }
368
369 pub fn peek_used(&mut self) -> Option<u16> {
372 self.queue.peek_used()
373 }
374
375 pub fn virt_queue_size(&self) -> u16 {
379 QUEUE_SIZE
380 }
381}
382
383impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
384 fn drop(&mut self) {
385 self.transport.queue_unset(QUEUE);
388 }
389}
390
391#[derive(FromBytes, Immutable, IntoBytes)]
392#[repr(C)]
393struct BlkConfig {
394 capacity_low: ReadOnly<u32>,
396 capacity_high: ReadOnly<u32>,
397 size_max: ReadOnly<u32>,
398 seg_max: ReadOnly<u32>,
399 cylinders: ReadOnly<u16>,
400 heads: ReadOnly<u8>,
401 sectors: ReadOnly<u8>,
402 blk_size: ReadOnly<u32>,
403 physical_block_exp: ReadOnly<u8>,
404 alignment_offset: ReadOnly<u8>,
405 min_io_size: ReadOnly<u16>,
406 opt_io_size: ReadOnly<u32>,
407 }
409
410#[repr(C)]
412#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
413pub struct BlkReq {
414 type_: ReqType,
415 reserved: u32,
416 sector: u64,
417}
418
419impl Default for BlkReq {
420 fn default() -> Self {
421 Self {
422 type_: ReqType::In,
423 reserved: 0,
424 sector: 0,
425 }
426 }
427}
428
429#[repr(C)]
431#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
432pub struct BlkResp {
433 status: RespStatus,
434}
435
436impl BlkResp {
437 pub fn status(&self) -> RespStatus {
439 self.status
440 }
441}
442
443#[repr(u32)]
444#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
445enum ReqType {
446 In = 0,
447 Out = 1,
448 Flush = 4,
449 GetId = 8,
450 GetLifetime = 10,
451 Discard = 11,
452 WriteZeroes = 13,
453 SecureErase = 14,
454}
455
456#[repr(transparent)]
458#[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
459pub struct RespStatus(u8);
460
461impl RespStatus {
462 pub const OK: RespStatus = RespStatus(0);
464 pub const IO_ERR: RespStatus = RespStatus(1);
466 pub const UNSUPPORTED: RespStatus = RespStatus(2);
468 pub const NOT_READY: RespStatus = RespStatus(3);
470}
471
472impl From<RespStatus> for Result {
473 fn from(status: RespStatus) -> Self {
474 match status {
475 RespStatus::OK => Ok(()),
476 RespStatus::IO_ERR => Err(Error::IoError),
477 RespStatus::UNSUPPORTED => Err(Error::Unsupported),
478 RespStatus::NOT_READY => Err(Error::NotReady),
479 _ => Err(Error::IoError),
480 }
481 }
482}
483
484impl Default for BlkResp {
485 fn default() -> Self {
486 BlkResp {
487 status: RespStatus::NOT_READY,
488 }
489 }
490}
491
492pub const SECTOR_SIZE: usize = 512;
495
496bitflags! {
497 #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
498 struct BlkFeature: u64 {
499 const BARRIER = 1 << 0;
501 const SIZE_MAX = 1 << 1;
503 const SEG_MAX = 1 << 2;
505 const GEOMETRY = 1 << 4;
507 const RO = 1 << 5;
509 const BLK_SIZE = 1 << 6;
511 const SCSI = 1 << 7;
513 const FLUSH = 1 << 9;
515 const TOPOLOGY = 1 << 10;
517 const CONFIG_WCE = 1 << 11;
519 const MQ = 1 << 12;
521 const DISCARD = 1 << 13;
525 const WRITE_ZEROES = 1 << 14;
529 const LIFETIME = 1 << 15;
531 const SECURE_ERASE = 1 << 16;
533
534 const NOTIFY_ON_EMPTY = 1 << 24; const ANY_LAYOUT = 1 << 27; const RING_INDIRECT_DESC = 1 << 28;
538 const RING_EVENT_IDX = 1 << 29;
539 const UNUSED = 1 << 30; const VERSION_1 = 1 << 32; const ACCESS_PLATFORM = 1 << 33;
544 const RING_PACKED = 1 << 34;
545 const IN_ORDER = 1 << 35;
546 const ORDER_PLATFORM = 1 << 36;
547 const SR_IOV = 1 << 37;
548 const NOTIFICATION_DATA = 1 << 38;
549 }
550}
551
552#[cfg(test)]
553mod tests {
554 use super::*;
555 use crate::{
556 hal::fake::FakeHal,
557 transport::{
558 fake::{FakeTransport, QueueStatus, State},
559 DeviceType,
560 },
561 };
562 use alloc::{sync::Arc, vec};
563 use core::mem::size_of;
564 use std::{sync::Mutex, thread};
565
566 #[test]
567 fn config() {
568 let config_space = BlkConfig {
569 capacity_low: ReadOnly::new(0x42),
570 capacity_high: ReadOnly::new(0x02),
571 size_max: ReadOnly::new(0),
572 seg_max: ReadOnly::new(0),
573 cylinders: ReadOnly::new(0),
574 heads: ReadOnly::new(0),
575 sectors: ReadOnly::new(0),
576 blk_size: ReadOnly::new(0),
577 physical_block_exp: ReadOnly::new(0),
578 alignment_offset: ReadOnly::new(0),
579 min_io_size: ReadOnly::new(0),
580 opt_io_size: ReadOnly::new(0),
581 };
582 let state = Arc::new(Mutex::new(State::new(
583 vec![QueueStatus::default()],
584 config_space,
585 )));
586 let transport = FakeTransport {
587 device_type: DeviceType::Block,
588 max_queue_size: QUEUE_SIZE.into(),
589 device_features: BlkFeature::RO.bits(),
590 state: state.clone(),
591 };
592 let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
593
594 assert_eq!(blk.capacity(), 0x02_0000_0042);
595 assert_eq!(blk.readonly(), true);
596 }
597
598 #[test]
599 fn read() {
600 let config_space = BlkConfig {
601 capacity_low: ReadOnly::new(66),
602 capacity_high: ReadOnly::new(0),
603 size_max: ReadOnly::new(0),
604 seg_max: ReadOnly::new(0),
605 cylinders: ReadOnly::new(0),
606 heads: ReadOnly::new(0),
607 sectors: ReadOnly::new(0),
608 blk_size: ReadOnly::new(0),
609 physical_block_exp: ReadOnly::new(0),
610 alignment_offset: ReadOnly::new(0),
611 min_io_size: ReadOnly::new(0),
612 opt_io_size: ReadOnly::new(0),
613 };
614 let state = Arc::new(Mutex::new(State::new(
615 vec![QueueStatus::default()],
616 config_space,
617 )));
618 let transport = FakeTransport {
619 device_type: DeviceType::Block,
620 max_queue_size: QUEUE_SIZE.into(),
621 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
622 state: state.clone(),
623 };
624 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
625
626 let handle = thread::spawn(move || {
628 println!("Device waiting for a request.");
629 State::wait_until_queue_notified(&state, QUEUE);
630 println!("Transmit queue was notified.");
631
632 assert!(state
633 .lock()
634 .unwrap()
635 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
636 assert_eq!(
637 request,
638 BlkReq {
639 type_: ReqType::In,
640 reserved: 0,
641 sector: 42
642 }
643 .as_bytes()
644 );
645
646 let mut response = vec![0; SECTOR_SIZE];
647 response[0..9].copy_from_slice(b"Test data");
648 response.extend_from_slice(
649 BlkResp {
650 status: RespStatus::OK,
651 }
652 .as_bytes(),
653 );
654
655 response
656 }));
657 });
658
659 let mut buffer = [0; 512];
661 blk.read_blocks(42, &mut buffer).unwrap();
662 assert_eq!(&buffer[0..9], b"Test data");
663
664 handle.join().unwrap();
665 }
666
667 #[test]
668 fn write() {
669 let config_space = BlkConfig {
670 capacity_low: ReadOnly::new(66),
671 capacity_high: ReadOnly::new(0),
672 size_max: ReadOnly::new(0),
673 seg_max: ReadOnly::new(0),
674 cylinders: ReadOnly::new(0),
675 heads: ReadOnly::new(0),
676 sectors: ReadOnly::new(0),
677 blk_size: ReadOnly::new(0),
678 physical_block_exp: ReadOnly::new(0),
679 alignment_offset: ReadOnly::new(0),
680 min_io_size: ReadOnly::new(0),
681 opt_io_size: ReadOnly::new(0),
682 };
683 let state = Arc::new(Mutex::new(State::new(
684 vec![QueueStatus::default()],
685 config_space,
686 )));
687 let transport = FakeTransport {
688 device_type: DeviceType::Block,
689 max_queue_size: QUEUE_SIZE.into(),
690 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
691 state: state.clone(),
692 };
693 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
694
695 let handle = thread::spawn(move || {
697 println!("Device waiting for a request.");
698 State::wait_until_queue_notified(&state, QUEUE);
699 println!("Transmit queue was notified.");
700
701 assert!(state
702 .lock()
703 .unwrap()
704 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
705 assert_eq!(
706 &request[0..size_of::<BlkReq>()],
707 BlkReq {
708 type_: ReqType::Out,
709 reserved: 0,
710 sector: 42
711 }
712 .as_bytes()
713 );
714 let data = &request[size_of::<BlkReq>()..];
715 assert_eq!(data.len(), SECTOR_SIZE);
716 assert_eq!(&data[0..9], b"Test data");
717
718 let mut response = Vec::new();
719 response.extend_from_slice(
720 BlkResp {
721 status: RespStatus::OK,
722 }
723 .as_bytes(),
724 );
725
726 response
727 }));
728 });
729
730 let mut buffer = [0; 512];
732 buffer[0..9].copy_from_slice(b"Test data");
733 blk.write_blocks(42, &mut buffer).unwrap();
734
735 blk.flush().unwrap();
737
738 handle.join().unwrap();
739 }
740
741 #[test]
742 fn flush() {
743 let config_space = BlkConfig {
744 capacity_low: ReadOnly::new(66),
745 capacity_high: ReadOnly::new(0),
746 size_max: ReadOnly::new(0),
747 seg_max: ReadOnly::new(0),
748 cylinders: ReadOnly::new(0),
749 heads: ReadOnly::new(0),
750 sectors: ReadOnly::new(0),
751 blk_size: ReadOnly::new(0),
752 physical_block_exp: ReadOnly::new(0),
753 alignment_offset: ReadOnly::new(0),
754 min_io_size: ReadOnly::new(0),
755 opt_io_size: ReadOnly::new(0),
756 };
757 let state = Arc::new(Mutex::new(State::new(
758 vec![QueueStatus::default()],
759 config_space,
760 )));
761 let transport = FakeTransport {
762 device_type: DeviceType::Block,
763 max_queue_size: QUEUE_SIZE.into(),
764 device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
765 state: state.clone(),
766 };
767 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
768
769 let handle = thread::spawn(move || {
771 println!("Device waiting for a request.");
772 State::wait_until_queue_notified(&state, QUEUE);
773 println!("Transmit queue was notified.");
774
775 assert!(state
776 .lock()
777 .unwrap()
778 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
779 assert_eq!(
780 request,
781 BlkReq {
782 type_: ReqType::Flush,
783 reserved: 0,
784 sector: 0,
785 }
786 .as_bytes()
787 );
788
789 let mut response = Vec::new();
790 response.extend_from_slice(
791 BlkResp {
792 status: RespStatus::OK,
793 }
794 .as_bytes(),
795 );
796
797 response
798 }));
799 });
800
801 blk.flush().unwrap();
803
804 handle.join().unwrap();
805 }
806
807 #[test]
808 fn device_id() {
809 let config_space = BlkConfig {
810 capacity_low: ReadOnly::new(66),
811 capacity_high: ReadOnly::new(0),
812 size_max: ReadOnly::new(0),
813 seg_max: ReadOnly::new(0),
814 cylinders: ReadOnly::new(0),
815 heads: ReadOnly::new(0),
816 sectors: ReadOnly::new(0),
817 blk_size: ReadOnly::new(0),
818 physical_block_exp: ReadOnly::new(0),
819 alignment_offset: ReadOnly::new(0),
820 min_io_size: ReadOnly::new(0),
821 opt_io_size: ReadOnly::new(0),
822 };
823 let state = Arc::new(Mutex::new(State::new(
824 vec![QueueStatus::default()],
825 config_space,
826 )));
827 let transport = FakeTransport {
828 device_type: DeviceType::Block,
829 max_queue_size: QUEUE_SIZE.into(),
830 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
831 state: state.clone(),
832 };
833 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
834
835 let handle = thread::spawn(move || {
837 println!("Device waiting for a request.");
838 State::wait_until_queue_notified(&state, QUEUE);
839 println!("Transmit queue was notified.");
840
841 assert!(state
842 .lock()
843 .unwrap()
844 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
845 assert_eq!(
846 request,
847 BlkReq {
848 type_: ReqType::GetId,
849 reserved: 0,
850 sector: 0,
851 }
852 .as_bytes()
853 );
854
855 let mut response = Vec::new();
856 response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
857 response.extend_from_slice(
858 BlkResp {
859 status: RespStatus::OK,
860 }
861 .as_bytes(),
862 );
863
864 response
865 }));
866 });
867
868 let mut id = [0; 20];
869 let length = blk.device_id(&mut id).unwrap();
870 assert_eq!(&id[0..length], b"device_id");
871
872 handle.join().unwrap();
873 }
874}