1use crate::config::{ReadOnly, read_config};
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::{InterruptStatus, Transport};
7use crate::{Error, Result};
8use bitflags::bitflags;
9use log::info;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12const QUEUE: u16 = 0;
13const QUEUE_SIZE: u16 = 16;
14const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15 .union(BlkFeature::FLUSH)
16 .union(BlkFeature::RING_INDIRECT_DESC)
17 .union(BlkFeature::RING_EVENT_IDX)
18 .union(BlkFeature::VERSION_1);
19
20pub struct VirtIOBlk<H: Hal, T: Transport> {
47 transport: T,
48 queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
49 capacity: u64,
50 negotiated_features: BlkFeature,
51}
52
53impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
54 pub fn new(mut transport: T) -> Result<Self> {
56 let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
57
58 let capacity = transport.read_consistent(|| {
60 Ok((read_config!(transport, BlkConfig, capacity_low)? as u64)
61 | ((read_config!(transport, BlkConfig, capacity_high)? as u64) << 32))
62 })?;
63 info!("found a block device of size {}KB", capacity / 2);
64
65 let queue = VirtQueue::new(
66 &mut transport,
67 QUEUE,
68 negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
69 negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
70 )?;
71 transport.finish_init();
72
73 Ok(VirtIOBlk {
74 transport,
75 queue,
76 capacity,
77 negotiated_features,
78 })
79 }
80
81 pub fn capacity(&self) -> u64 {
83 self.capacity
84 }
85
86 pub fn readonly(&self) -> bool {
88 self.negotiated_features.contains(BlkFeature::RO)
89 }
90
91 pub fn ack_interrupt(&mut self) -> InterruptStatus {
95 self.transport.ack_interrupt()
96 }
97
98 pub fn enable_interrupts(&mut self) {
100 self.queue.set_dev_notify(true);
101 }
102
103 pub fn disable_interrupts(&mut self) {
105 self.queue.set_dev_notify(false);
106 }
107
108 fn request(&mut self, request: BlkReq) -> Result {
110 let mut resp = BlkResp::default();
111 self.queue.add_notify_wait_pop(
112 &[request.as_bytes()],
113 &mut [resp.as_mut_bytes()],
114 &mut self.transport,
115 )?;
116 resp.status.into()
117 }
118
119 fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
121 let mut resp = BlkResp::default();
122 self.queue.add_notify_wait_pop(
123 &[request.as_bytes()],
124 &mut [data, resp.as_mut_bytes()],
125 &mut self.transport,
126 )?;
127 resp.status.into()
128 }
129
130 fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
132 let mut resp = BlkResp::default();
133 self.queue.add_notify_wait_pop(
134 &[request.as_bytes(), data],
135 &mut [resp.as_mut_bytes()],
136 &mut self.transport,
137 )?;
138 resp.status.into()
139 }
140
141 pub fn flush(&mut self) -> Result {
145 if self.negotiated_features.contains(BlkFeature::FLUSH) {
146 self.request(BlkReq {
147 type_: ReqType::Flush,
148 ..Default::default()
149 })
150 } else {
151 Ok(())
152 }
153 }
154
155 pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
160 self.request_read(
161 BlkReq {
162 type_: ReqType::GetId,
163 ..Default::default()
164 },
165 id,
166 )?;
167
168 let length = id.iter().position(|&x| x == 0).unwrap_or(20);
169 Ok(length)
170 }
171
172 pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
178 assert_ne!(buf.len(), 0);
179 assert_eq!(buf.len() % SECTOR_SIZE, 0);
180 self.request_read(
181 BlkReq {
182 type_: ReqType::In,
183 reserved: 0,
184 sector: block_id as u64,
185 },
186 buf,
187 )
188 }
189
190 pub unsafe fn read_blocks_nb(
248 &mut self,
249 block_id: usize,
250 req: &mut BlkReq,
251 buf: &mut [u8],
252 resp: &mut BlkResp,
253 ) -> Result<u16> {
254 assert_ne!(buf.len(), 0);
255 assert_eq!(buf.len() % SECTOR_SIZE, 0);
256 *req = BlkReq {
257 type_: ReqType::In,
258 reserved: 0,
259 sector: block_id as u64,
260 };
261 let token = unsafe {
264 self.queue
265 .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?
266 };
267 if self.queue.should_notify() {
268 self.transport.notify(QUEUE);
269 }
270 Ok(token)
271 }
272
273 pub unsafe fn complete_read_blocks(
280 &mut self,
281 token: u16,
282 req: &BlkReq,
283 buf: &mut [u8],
284 resp: &mut BlkResp,
285 ) -> Result<()> {
286 unsafe {
289 self.queue
290 .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
291 }
292 resp.status.into()
293 }
294
295 pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
301 assert_ne!(buf.len(), 0);
302 assert_eq!(buf.len() % SECTOR_SIZE, 0);
303 self.request_write(
304 BlkReq {
305 type_: ReqType::Out,
306 sector: block_id as u64,
307 ..Default::default()
308 },
309 buf,
310 )
311 }
312
313 pub unsafe fn write_blocks_nb(
337 &mut self,
338 block_id: usize,
339 req: &mut BlkReq,
340 buf: &[u8],
341 resp: &mut BlkResp,
342 ) -> Result<u16> {
343 assert_ne!(buf.len(), 0);
344 assert_eq!(buf.len() % SECTOR_SIZE, 0);
345 *req = BlkReq {
346 type_: ReqType::Out,
347 reserved: 0,
348 sector: block_id as u64,
349 };
350 let token = unsafe {
353 self.queue
354 .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?
355 };
356 if self.queue.should_notify() {
357 self.transport.notify(QUEUE);
358 }
359 Ok(token)
360 }
361
362 pub unsafe fn complete_write_blocks(
369 &mut self,
370 token: u16,
371 req: &BlkReq,
372 buf: &[u8],
373 resp: &mut BlkResp,
374 ) -> Result<()> {
375 unsafe {
378 self.queue
379 .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
380 }
381 resp.status.into()
382 }
383
384 pub fn peek_used(&mut self) -> Option<u16> {
387 self.queue.peek_used()
388 }
389
390 pub fn virt_queue_size(&self) -> u16 {
394 QUEUE_SIZE
395 }
396}
397
398impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
399 fn drop(&mut self) {
400 self.transport.queue_unset(QUEUE);
403 }
404}
405
406#[derive(FromBytes, Immutable, IntoBytes)]
407#[repr(C)]
408struct BlkConfig {
409 capacity_low: ReadOnly<u32>,
411 capacity_high: ReadOnly<u32>,
412 size_max: ReadOnly<u32>,
413 seg_max: ReadOnly<u32>,
414 cylinders: ReadOnly<u16>,
415 heads: ReadOnly<u8>,
416 sectors: ReadOnly<u8>,
417 blk_size: ReadOnly<u32>,
418 physical_block_exp: ReadOnly<u8>,
419 alignment_offset: ReadOnly<u8>,
420 min_io_size: ReadOnly<u16>,
421 opt_io_size: ReadOnly<u32>,
422 }
424
425#[repr(C)]
427#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
428pub struct BlkReq {
429 type_: ReqType,
430 reserved: u32,
431 sector: u64,
432}
433
434impl Default for BlkReq {
435 fn default() -> Self {
436 Self {
437 type_: ReqType::In,
438 reserved: 0,
439 sector: 0,
440 }
441 }
442}
443
444#[repr(C)]
446#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
447pub struct BlkResp {
448 status: RespStatus,
449}
450
451impl BlkResp {
452 pub fn status(&self) -> RespStatus {
454 self.status
455 }
456}
457
458#[repr(u32)]
459#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
460enum ReqType {
461 In = 0,
462 Out = 1,
463 Flush = 4,
464 GetId = 8,
465 GetLifetime = 10,
466 Discard = 11,
467 WriteZeroes = 13,
468 SecureErase = 14,
469}
470
471#[repr(transparent)]
473#[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
474pub struct RespStatus(u8);
475
476impl RespStatus {
477 pub const OK: RespStatus = RespStatus(0);
479 pub const IO_ERR: RespStatus = RespStatus(1);
481 pub const UNSUPPORTED: RespStatus = RespStatus(2);
483 pub const NOT_READY: RespStatus = RespStatus(3);
485}
486
487impl From<RespStatus> for Result {
488 fn from(status: RespStatus) -> Self {
489 match status {
490 RespStatus::OK => Ok(()),
491 RespStatus::IO_ERR => Err(Error::IoError),
492 RespStatus::UNSUPPORTED => Err(Error::Unsupported),
493 RespStatus::NOT_READY => Err(Error::NotReady),
494 _ => Err(Error::IoError),
495 }
496 }
497}
498
499impl Default for BlkResp {
500 fn default() -> Self {
501 BlkResp {
502 status: RespStatus::NOT_READY,
503 }
504 }
505}
506
507pub const SECTOR_SIZE: usize = 512;
510
511bitflags! {
512 #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
513 struct BlkFeature: u64 {
514 const BARRIER = 1 << 0;
516 const SIZE_MAX = 1 << 1;
518 const SEG_MAX = 1 << 2;
520 const GEOMETRY = 1 << 4;
522 const RO = 1 << 5;
524 const BLK_SIZE = 1 << 6;
526 const SCSI = 1 << 7;
528 const FLUSH = 1 << 9;
530 const TOPOLOGY = 1 << 10;
532 const CONFIG_WCE = 1 << 11;
534 const MQ = 1 << 12;
536 const DISCARD = 1 << 13;
540 const WRITE_ZEROES = 1 << 14;
544 const LIFETIME = 1 << 15;
546 const SECURE_ERASE = 1 << 16;
548
549 const NOTIFY_ON_EMPTY = 1 << 24; const ANY_LAYOUT = 1 << 27; const RING_INDIRECT_DESC = 1 << 28;
553 const RING_EVENT_IDX = 1 << 29;
554 const UNUSED = 1 << 30; const VERSION_1 = 1 << 32; const ACCESS_PLATFORM = 1 << 33;
559 const RING_PACKED = 1 << 34;
560 const IN_ORDER = 1 << 35;
561 const ORDER_PLATFORM = 1 << 36;
562 const SR_IOV = 1 << 37;
563 const NOTIFICATION_DATA = 1 << 38;
564 }
565}
566
567#[cfg(test)]
568mod tests {
569 use super::*;
570 use crate::{
571 hal::fake::FakeHal,
572 transport::{
573 DeviceType,
574 fake::{FakeTransport, QueueStatus, State},
575 },
576 };
577 use alloc::{sync::Arc, vec};
578 use core::mem::size_of;
579 use std::{sync::Mutex, thread};
580
581 #[test]
582 fn config() {
583 let config_space = BlkConfig {
584 capacity_low: ReadOnly::new(0x42),
585 capacity_high: ReadOnly::new(0x02),
586 size_max: ReadOnly::new(0),
587 seg_max: ReadOnly::new(0),
588 cylinders: ReadOnly::new(0),
589 heads: ReadOnly::new(0),
590 sectors: ReadOnly::new(0),
591 blk_size: ReadOnly::new(0),
592 physical_block_exp: ReadOnly::new(0),
593 alignment_offset: ReadOnly::new(0),
594 min_io_size: ReadOnly::new(0),
595 opt_io_size: ReadOnly::new(0),
596 };
597 let state = Arc::new(Mutex::new(State::new(
598 vec![QueueStatus::default()],
599 config_space,
600 )));
601 let transport = FakeTransport {
602 device_type: DeviceType::Block,
603 max_queue_size: QUEUE_SIZE.into(),
604 device_features: BlkFeature::RO.bits(),
605 state: state.clone(),
606 };
607 let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
608
609 assert_eq!(blk.capacity(), 0x02_0000_0042);
610 assert_eq!(blk.readonly(), true);
611 }
612
613 #[test]
614 fn read() {
615 let config_space = BlkConfig {
616 capacity_low: ReadOnly::new(66),
617 capacity_high: ReadOnly::new(0),
618 size_max: ReadOnly::new(0),
619 seg_max: ReadOnly::new(0),
620 cylinders: ReadOnly::new(0),
621 heads: ReadOnly::new(0),
622 sectors: ReadOnly::new(0),
623 blk_size: ReadOnly::new(0),
624 physical_block_exp: ReadOnly::new(0),
625 alignment_offset: ReadOnly::new(0),
626 min_io_size: ReadOnly::new(0),
627 opt_io_size: ReadOnly::new(0),
628 };
629 let state = Arc::new(Mutex::new(State::new(
630 vec![QueueStatus::default()],
631 config_space,
632 )));
633 let transport = FakeTransport {
634 device_type: DeviceType::Block,
635 max_queue_size: QUEUE_SIZE.into(),
636 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
637 state: state.clone(),
638 };
639 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
640
641 let handle = thread::spawn(move || {
643 println!("Device waiting for a request.");
644 State::wait_until_queue_notified(&state, QUEUE);
645 println!("Transmit queue was notified.");
646
647 assert!(
648 state
649 .lock()
650 .unwrap()
651 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
652 assert_eq!(
653 request,
654 BlkReq {
655 type_: ReqType::In,
656 reserved: 0,
657 sector: 42
658 }
659 .as_bytes()
660 );
661
662 let mut response = vec![0; SECTOR_SIZE];
663 response[0..9].copy_from_slice(b"Test data");
664 response.extend_from_slice(
665 BlkResp {
666 status: RespStatus::OK,
667 }
668 .as_bytes(),
669 );
670
671 response
672 })
673 );
674 });
675
676 let mut buffer = [0; 512];
678 blk.read_blocks(42, &mut buffer).unwrap();
679 assert_eq!(&buffer[0..9], b"Test data");
680
681 handle.join().unwrap();
682 }
683
684 #[test]
685 fn write() {
686 let config_space = BlkConfig {
687 capacity_low: ReadOnly::new(66),
688 capacity_high: ReadOnly::new(0),
689 size_max: ReadOnly::new(0),
690 seg_max: ReadOnly::new(0),
691 cylinders: ReadOnly::new(0),
692 heads: ReadOnly::new(0),
693 sectors: ReadOnly::new(0),
694 blk_size: ReadOnly::new(0),
695 physical_block_exp: ReadOnly::new(0),
696 alignment_offset: ReadOnly::new(0),
697 min_io_size: ReadOnly::new(0),
698 opt_io_size: ReadOnly::new(0),
699 };
700 let state = Arc::new(Mutex::new(State::new(
701 vec![QueueStatus::default()],
702 config_space,
703 )));
704 let transport = FakeTransport {
705 device_type: DeviceType::Block,
706 max_queue_size: QUEUE_SIZE.into(),
707 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
708 state: state.clone(),
709 };
710 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
711
712 let handle = thread::spawn(move || {
714 println!("Device waiting for a request.");
715 State::wait_until_queue_notified(&state, QUEUE);
716 println!("Transmit queue was notified.");
717
718 assert!(
719 state
720 .lock()
721 .unwrap()
722 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
723 assert_eq!(
724 &request[0..size_of::<BlkReq>()],
725 BlkReq {
726 type_: ReqType::Out,
727 reserved: 0,
728 sector: 42
729 }
730 .as_bytes()
731 );
732 let data = &request[size_of::<BlkReq>()..];
733 assert_eq!(data.len(), SECTOR_SIZE);
734 assert_eq!(&data[0..9], b"Test data");
735
736 let mut response = Vec::new();
737 response.extend_from_slice(
738 BlkResp {
739 status: RespStatus::OK,
740 }
741 .as_bytes(),
742 );
743
744 response
745 })
746 );
747 });
748
749 let mut buffer = [0; 512];
751 buffer[0..9].copy_from_slice(b"Test data");
752 blk.write_blocks(42, &mut buffer).unwrap();
753
754 blk.flush().unwrap();
756
757 handle.join().unwrap();
758 }
759
760 #[test]
761 fn flush() {
762 let config_space = BlkConfig {
763 capacity_low: ReadOnly::new(66),
764 capacity_high: ReadOnly::new(0),
765 size_max: ReadOnly::new(0),
766 seg_max: ReadOnly::new(0),
767 cylinders: ReadOnly::new(0),
768 heads: ReadOnly::new(0),
769 sectors: ReadOnly::new(0),
770 blk_size: ReadOnly::new(0),
771 physical_block_exp: ReadOnly::new(0),
772 alignment_offset: ReadOnly::new(0),
773 min_io_size: ReadOnly::new(0),
774 opt_io_size: ReadOnly::new(0),
775 };
776 let state = Arc::new(Mutex::new(State::new(
777 vec![QueueStatus::default()],
778 config_space,
779 )));
780 let transport = FakeTransport {
781 device_type: DeviceType::Block,
782 max_queue_size: QUEUE_SIZE.into(),
783 device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
784 state: state.clone(),
785 };
786 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
787
788 let handle = thread::spawn(move || {
790 println!("Device waiting for a request.");
791 State::wait_until_queue_notified(&state, QUEUE);
792 println!("Transmit queue was notified.");
793
794 assert!(
795 state
796 .lock()
797 .unwrap()
798 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
799 assert_eq!(
800 request,
801 BlkReq {
802 type_: ReqType::Flush,
803 reserved: 0,
804 sector: 0,
805 }
806 .as_bytes()
807 );
808
809 let mut response = Vec::new();
810 response.extend_from_slice(
811 BlkResp {
812 status: RespStatus::OK,
813 }
814 .as_bytes(),
815 );
816
817 response
818 })
819 );
820 });
821
822 blk.flush().unwrap();
824
825 handle.join().unwrap();
826 }
827
828 #[test]
829 fn device_id() {
830 let config_space = BlkConfig {
831 capacity_low: ReadOnly::new(66),
832 capacity_high: ReadOnly::new(0),
833 size_max: ReadOnly::new(0),
834 seg_max: ReadOnly::new(0),
835 cylinders: ReadOnly::new(0),
836 heads: ReadOnly::new(0),
837 sectors: ReadOnly::new(0),
838 blk_size: ReadOnly::new(0),
839 physical_block_exp: ReadOnly::new(0),
840 alignment_offset: ReadOnly::new(0),
841 min_io_size: ReadOnly::new(0),
842 opt_io_size: ReadOnly::new(0),
843 };
844 let state = Arc::new(Mutex::new(State::new(
845 vec![QueueStatus::default()],
846 config_space,
847 )));
848 let transport = FakeTransport {
849 device_type: DeviceType::Block,
850 max_queue_size: QUEUE_SIZE.into(),
851 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
852 state: state.clone(),
853 };
854 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
855
856 let handle = thread::spawn(move || {
858 println!("Device waiting for a request.");
859 State::wait_until_queue_notified(&state, QUEUE);
860 println!("Transmit queue was notified.");
861
862 assert!(
863 state
864 .lock()
865 .unwrap()
866 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
867 assert_eq!(
868 request,
869 BlkReq {
870 type_: ReqType::GetId,
871 reserved: 0,
872 sector: 0,
873 }
874 .as_bytes()
875 );
876
877 let mut response = Vec::new();
878 response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
879 response.extend_from_slice(
880 BlkResp {
881 status: RespStatus::OK,
882 }
883 .as_bytes(),
884 );
885
886 response
887 })
888 );
889 });
890
891 let mut id = [0; 20];
892 let length = blk.device_id(&mut id).unwrap();
893 assert_eq!(&id[0..length], b"device_id");
894
895 handle.join().unwrap();
896 }
897}