1use core::any::Any;
2
3use alloc::{boxed::Box, collections::BTreeSet, sync::Arc};
4use rd_block::{
5 BlkError, Block as RdBlock, BuffConfig, DriverGeneric, Event, IQueue, IdList, Interface,
6 Request, RequestId, RequestKind,
7};
8use spin::Mutex;
9
10use crate::{Namespace, Nvme, err::Result};
11
12struct NvmeBlockInner {
13 nvme: Nvme,
14 namespace: Namespace,
15 irq_enabled: bool,
16 pending_irq: IdList,
17 completed: BTreeSet<RequestId>,
18 next_request_id: usize,
19 next_queue_id: usize,
20}
21
22pub struct NvmeBlockDriver {
23 name: &'static str,
24 inner: Arc<Mutex<NvmeBlockInner>>,
25}
26
27impl NvmeBlockDriver {
28 pub fn from_nvme(mut nvme: Nvme) -> Result<Self> {
29 let namespace = nvme
30 .namespace_list()?
31 .into_iter()
32 .next()
33 .ok_or(crate::err::Error::Unknown("no active namespace found"))?;
34
35 Ok(Self::with_namespace("nvme", nvme, namespace))
36 }
37
38 pub fn with_namespace(name: &'static str, nvme: Nvme, namespace: Namespace) -> Self {
39 Self {
40 name,
41 inner: Arc::new(Mutex::new(NvmeBlockInner {
42 nvme,
43 namespace,
44 irq_enabled: true,
45 pending_irq: IdList::none(),
46 completed: BTreeSet::new(),
47 next_request_id: 1,
48 next_queue_id: 0,
49 })),
50 }
51 }
52
53 pub fn namespace(&self) -> Namespace {
54 self.inner.lock().namespace
55 }
56
57 pub fn into_block(self, dma_op: &'static dyn dma_api::DmaOp) -> RdBlock {
58 RdBlock::new(self, dma_op)
59 }
60}
61
62impl DriverGeneric for NvmeBlockDriver {
63 fn name(&self) -> &str {
64 self.name
65 }
66
67 fn raw_any(&self) -> Option<&dyn Any> {
68 Some(self)
69 }
70
71 fn raw_any_mut(&mut self) -> Option<&mut dyn Any> {
72 Some(self)
73 }
74}
75
76impl Interface for NvmeBlockDriver {
77 fn create_queue(&mut self) -> Option<Box<dyn IQueue>> {
78 let mut inner = self.inner.lock();
79 let queue_id = inner.next_queue_id;
80 inner.next_queue_id += 1;
81
82 Some(Box::new(NvmeRequestQueue {
83 id: queue_id,
84 inner: self.inner.clone(),
85 }))
86 }
87
88 fn enable_irq(&mut self) {
89 self.inner.lock().irq_enabled = true;
90 }
91
92 fn disable_irq(&mut self) {
93 self.inner.lock().irq_enabled = false;
94 }
95
96 fn is_irq_enabled(&self) -> bool {
97 self.inner.lock().irq_enabled
98 }
99
100 fn handle_irq(&mut self) -> Event {
101 let mut inner = self.inner.lock();
102 if !inner.irq_enabled {
103 return Event::none();
104 }
105
106 let mut event = Event::none();
107 core::mem::swap(&mut event.queue, &mut inner.pending_irq);
108 event
109 }
110}
111
112struct NvmeRequestQueue {
113 id: usize,
114 inner: Arc<Mutex<NvmeBlockInner>>,
115}
116
117impl IQueue for NvmeRequestQueue {
118 fn id(&self) -> usize {
119 self.id
120 }
121
122 fn num_blocks(&self) -> usize {
123 self.inner.lock().namespace.lba_count
124 }
125
126 fn block_size(&self) -> usize {
127 self.inner.lock().namespace.lba_size
128 }
129
130 fn buff_config(&self) -> BuffConfig {
131 let inner = self.inner.lock();
132 BuffConfig {
133 dma_mask: inner.nvme.dma_mask(),
134 align: inner.namespace.lba_size,
135 size: inner.namespace.lba_size,
136 }
137 }
138
139 fn submit_request(
140 &mut self,
141 request: Request<'_>,
142 ) -> core::result::Result<RequestId, BlkError> {
143 let mut inner = self.inner.lock();
144 let namespace = inner.namespace;
145
146 if request.block_id >= namespace.lba_count {
147 return Err(BlkError::InvalidBlockIndex(request.block_id));
148 }
149
150 let req_id = RequestId::new(inner.next_request_id);
151 inner.next_request_id += 1;
152
153 match request.kind {
154 RequestKind::Read(buffer) => {
155 if buffer.size < namespace.lba_size {
156 return Err(BlkError::NotSupported);
157 }
158
159 let slice =
160 unsafe { core::slice::from_raw_parts_mut(buffer.virt, namespace.lba_size) };
161 inner
162 .nvme
163 .block_read_sync(&namespace, request.block_id as u64, slice)
164 .map_err(|err| BlkError::Other(Box::new(err)))?;
165 }
166 RequestKind::Write(buffer) => {
167 if buffer.len() != namespace.lba_size {
168 return Err(BlkError::NotSupported);
169 }
170
171 inner
172 .nvme
173 .block_write_sync(&namespace, request.block_id as u64, buffer)
174 .map_err(|err| BlkError::Other(Box::new(err)))?;
175 }
176 }
177
178 inner.completed.insert(req_id);
179 if inner.irq_enabled {
180 inner.pending_irq.insert(self.id);
181 }
182
183 Ok(req_id)
184 }
185
186 fn poll_request(&mut self, request: RequestId) -> core::result::Result<(), BlkError> {
187 let mut inner = self.inner.lock();
188 if inner.completed.remove(&request) {
189 Ok(())
190 } else {
191 Err(BlkError::Retry)
192 }
193 }
194}