1use alloc::vec::Vec;
2use core::ptr::NonNull;
3use dma_api::{DeviceDma, DmaDirection, DmaOp};
4use log::{debug, info};
5use mmio_api::{Mmio, MmioAddr, MmioOp};
6
7use crate::{
8 command::{
9 self, ControllerInfo, Feature, Identify, IdentifyActiveNamespaceList, IdentifyController,
10 IdentifyNamespaceDataStructure,
11 },
12 err::*,
13 queue::{CommandSet, NvmeQueue},
14 registers::NvmeReg,
15};
16
17pub struct Nvme {
18 bar: NonNull<NvmeReg>,
19 _mmio: Option<Mmio>,
20 dma: DeviceDma,
21 admin_queue: NvmeQueue,
22 io_queues: Vec<NvmeQueue>,
23 num_ns: usize,
24 sqes: u32,
25 cqes: u32,
26}
27
28#[derive(Debug, Clone, Copy)]
29pub struct Config {
30 pub page_size: usize,
31 pub io_queue_pair_count: usize,
32}
33
34impl Nvme {
35 pub fn new(
36 bar_addr: impl Into<MmioAddr>,
37 bar_size: usize,
38 dma_mask: u64,
39 dma_op: &'static dyn DmaOp,
40 mmio_op: &'static dyn MmioOp,
41 config: Config,
42 ) -> Result<Self> {
43 mmio_api::init(mmio_op);
44 let mmio = mmio_api::ioremap(bar_addr.into(), bar_size)?;
45 let dma = DeviceDma::new(dma_mask, dma_op);
46 Self::new_mmio(mmio, dma, config)
47 }
48
49 fn new_mmio(mmio: Mmio, dma: DeviceDma, config: Config) -> Result<Self> {
50 let bar = NonNull::new(mmio.as_ptr()).expect("mmio mapping must not be null");
51 Self::new_with_bar(bar.cast(), Some(mmio), dma, config)
52 }
53
54 fn new_with_bar(
55 bar: NonNull<NvmeReg>,
56 mmio: Option<Mmio>,
57 dma: DeviceDma,
58 config: Config,
59 ) -> Result<Self> {
60 let admin_queue = NvmeQueue::new(0, bar, &dma, config.page_size, 64, 64)?;
61
62 assert!(config.io_queue_pair_count > 0);
63
64 let mut s = Self {
65 bar,
66 _mmio: mmio,
67 dma,
68 admin_queue,
69 io_queues: Vec::new(),
70 num_ns: 0,
71 sqes: 6,
72 cqes: 4,
73 };
74
75 let version = s.version();
76
77 info!(
78 "NVME @{bar:?} init begin, version: {}.{}.{} ",
79 version.0, version.1, version.2
80 );
81
82 s.init(config)?;
83
84 Ok(s)
85 }
86
87 pub fn dma_mask(&self) -> u64 {
88 self.dma.dma_mask()
89 }
90
91 fn reset(&mut self) {
92 self.reg().reset();
93 }
94
95 fn reset_and_setup_controller_info(&mut self) -> Result<ControllerInfo> {
96 self.reset();
97
98 self.nvme_configure_admin_queue();
99
100 self.reg().ready_for_read_controller_info();
101
102 self.get_identfy(IdentifyController::new())
103 }
104
105 fn init(&mut self, config: Config) -> Result {
106 let controller = self.reset_and_setup_controller_info()?;
107
108 debug!("Controller: {:?}", controller);
109
110 self.sqes = controller.sqes_min as _;
111 self.cqes = controller.cqes_min as _;
112
113 self.reset();
114
115 self.nvme_configure_admin_queue();
116
117 self.reg().setup_cc(self.sqes, self.cqes);
118
119 let controller = self.get_identfy(IdentifyController::new())?;
120
121 debug!("Controller: {:?}", controller);
122
123 self.num_ns = controller.number_of_namespaces as _;
124
125 self.config_io_queue(config)?;
126
127 debug!("IO queue ok.");
128 loop {
129 let ns = self.get_identfy(IdentifyNamespaceDataStructure::new(1))?;
130 if let Some(ns) = ns {
131 debug!("Namespace: {:?}", ns);
132 break;
133 }
134 }
135 debug!("Namespace ok.");
136 Ok(())
137 }
138
139 pub fn namespace_list(&mut self) -> Result<Vec<Namespace>> {
140 let id_list = self.get_identfy(IdentifyActiveNamespaceList::new())?;
141 let mut out = Vec::new();
142
143 for id in id_list {
144 let ns = self
145 .get_identfy(IdentifyNamespaceDataStructure::new(id))?
146 .unwrap();
147
148 out.push(Namespace {
149 id,
150 lba_size: ns.lba_size as _,
151 lba_count: ns.namespace_size as _,
152 metadata_size: ns.metadata_size as _,
153 });
154 }
155
156 Ok(out)
157 }
158
159 fn nvme_configure_admin_queue(&mut self) {
164 self.reg().set_admin_submission_and_completion_queue_size(
165 self.admin_queue.sq.len(),
166 self.admin_queue.cq.len(),
167 );
168
169 self.reg()
170 .set_admin_submission_queue_base_address(self.admin_queue.sq.bus_addr());
171
172 self.reg()
173 .set_admin_completion_queue_base_address(self.admin_queue.cq.bus_addr());
174 }
175
176 fn config_io_queue(&mut self, config: Config) -> Result {
177 let num = config.io_queue_pair_count;
178 let cmd = CommandSet::set_features(Feature::NumberOfQueues {
180 nsq: num as u32 - 1,
181 ncq: num as u32 - 1,
182 });
183 self.admin_queue.command_sync(cmd)?;
184
185 for i in 0..num {
186 let id = (i + 1) as u32;
187 let io_queue = NvmeQueue::new(
188 id,
189 self.bar,
190 &self.dma,
191 config.page_size,
192 2usize.pow(self.sqes as _),
193 2usize.pow(self.cqes as _),
194 )?;
195
196 let data = CommandSet::create_io_completion_queue(
197 io_queue.qid,
198 io_queue.cq.len() as _,
199 io_queue.cq.bus_addr(),
200 true,
201 false,
202 0,
203 );
204 self.admin_queue.command_sync(data)?;
205
206 let data = CommandSet::create_io_submission_queue(
207 io_queue.qid,
208 io_queue.sq.len() as _,
209 io_queue.sq.bus_addr(),
210 true,
211 0,
212 io_queue.qid,
213 0,
214 );
215
216 self.admin_queue.command_sync(data)?;
217
218 self.io_queues.push(io_queue);
219 }
220
221 Ok(())
222 }
223
224 pub fn get_identfy<T: Identify>(&mut self, mut want: T) -> Result<T::Output> {
225 let cmd = want.command_set_mut();
226
227 cmd.cdw0 = CommandSet::cdw0_from_opcode(command::Opcode::IDENTIFY);
228 cmd.cdw10 = T::CNS;
229
230 let buff =
231 self.dma
232 .array_zero_with_align::<u8>(0x1000, 0x1000, DmaDirection::FromDevice)?;
233 cmd.prp1 = buff.dma_addr().as_u64();
234
235 self.admin_queue.command_sync(*cmd)?;
236
237 let data: Vec<u8> = buff.iter().collect();
238 let res = want.parse(&data);
239 Ok(res)
240 }
241
242 pub fn block_write_sync(
243 &mut self,
244 ns: &Namespace,
245 block_start: u64,
246 buff: &[u8],
247 ) -> Result<()> {
248 assert!(
249 buff.len().is_multiple_of(ns.lba_size),
250 "buffer size must be multiple of lba size"
251 );
252
253 let mut dma_buff = self.dma.array_zero_with_align::<u8>(
254 buff.len(),
255 ns.lba_size,
256 DmaDirection::ToDevice,
257 )?;
258 dma_buff.copy_from_slice(buff);
259
260 let blk_num = dma_buff.len() / ns.lba_size;
261
262 let cmd = CommandSet::nvm_cmd_write(
263 ns.id,
264 dma_buff.dma_addr().as_u64(),
265 block_start,
266 blk_num as _,
267 );
268
269 self.io_queues[0].command_sync(cmd)?;
270
271 Ok(())
272 }
273
274 pub fn block_read_sync(
275 &mut self,
276 ns: &Namespace,
277 block_start: u64,
278 buff: &mut [u8],
279 ) -> Result<()> {
280 assert!(
281 buff.len().is_multiple_of(ns.lba_size),
282 "buffer size must be multiple of lba size"
283 );
284
285 let dma_buff = self.dma.array_zero_with_align::<u8>(
286 buff.len(),
287 ns.lba_size,
288 DmaDirection::FromDevice,
289 )?;
290
291 let blk_num = dma_buff.len() / ns.lba_size;
292
293 let cmd = CommandSet::nvm_cmd_read(
294 ns.id,
295 dma_buff.dma_addr().as_u64(),
296 block_start,
297 blk_num as _,
298 );
299
300 self.io_queues[0].command_sync(cmd)?;
301
302 for (index, value) in dma_buff.iter().enumerate() {
303 buff[index] = value;
304 }
305 Ok(())
306 }
307
308 pub fn version(&self) -> (usize, usize, usize) {
309 self.reg().version()
310 }
311
312 fn reg(&self) -> &NvmeReg {
313 unsafe { self.bar.as_ref() }
314 }
315}
316
317unsafe impl Send for Nvme {}
318
319#[derive(Debug, Clone, Copy)]
320pub struct Namespace {
321 pub id: u32,
322 pub lba_size: usize,
323 pub lba_count: usize,
324 pub metadata_size: usize,
325}