1use core::ptr::NonNull;
2
3use alloc::vec::Vec;
4use dma_api::{DSlice, DSliceMut, DVec, Direction};
5use log::{debug, info};
6
7use crate::{
8 command::{
9 self, ControllerInfo, Feature, Identify, IdentifyActiveNamespaceList, IdentifyController,
10 IdentifyNamespaceDataStructure,
11 },
12 err::*,
13 queue::{CommandSet, NvmeQueue},
14 registers::NvmeReg,
15};
16
17pub struct Nvme {
18 bar: NonNull<NvmeReg>,
19 admin_queue: NvmeQueue,
20 io_queues: Vec<NvmeQueue>,
21 num_ns: usize,
22 sqes: u32,
23 cqes: u32,
24}
25
26#[derive(Debug, Clone, Copy)]
27pub struct Config {
28 pub page_size: usize,
29 pub io_queue_pair_count: usize,
30}
31
32impl Nvme {
33 pub fn new(bar: NonNull<u8>, config: Config) -> Result<Self> {
34 let admin_queue = NvmeQueue::new(0, bar.cast(), config.page_size, 64, 64)?;
35
36 assert!(config.io_queue_pair_count > 0);
37
38 let mut s = Self {
39 bar: bar.cast(),
40 admin_queue,
41 io_queues: Vec::new(),
42 num_ns: 0,
43 sqes: 6,
44 cqes: 4,
45 };
46
47 let version = s.version();
48
49 info!(
50 "NVME @{bar:?} init begin, version: {}.{}.{} ",
51 version.0, version.1, version.2
52 );
53
54 s.init(config)?;
55
56 Ok(s)
57 }
58
59 fn reset(&mut self) {
60 self.reg().reset();
61 }
62
63 fn reset_and_setup_controller_info(&mut self) -> Result<ControllerInfo> {
64 self.reset();
65
66 self.nvme_configure_admin_queue();
67
68 self.reg().ready_for_read_controller_info();
69
70 self.get_identfy(IdentifyController::new())
71 }
72
73 fn init(&mut self, config: Config) -> Result {
74 let controller = self.reset_and_setup_controller_info()?;
75
76 debug!("Controller: {:?}", controller);
77
78 self.sqes = controller.sqes_min as _;
79 self.cqes = controller.cqes_min as _;
80
81 self.reset();
82
83 self.nvme_configure_admin_queue();
84
85 self.reg().setup_cc(self.sqes, self.cqes);
86
87 let controller = self.get_identfy(IdentifyController::new())?;
88
89 debug!("Controller: {:?}", controller);
90
91 self.num_ns = controller.number_of_namespaces as _;
92
93 self.config_io_queue(config)?;
94
95 debug!("IO queue ok.");
96 loop {
97 let ns = self.get_identfy(IdentifyNamespaceDataStructure::new(1))?;
98 if let Some(ns) = ns {
99 debug!("Namespace: {:?}", ns);
100 break;
101 }
102 }
103 debug!("Namespace ok.");
104 Ok(())
105 }
106
107 pub fn namespace_list(&mut self) -> Result<Vec<Namespace>> {
108 let id_list = self.get_identfy(IdentifyActiveNamespaceList::new())?;
109 let mut out = Vec::new();
110
111 for id in id_list {
112 let ns = self
113 .get_identfy(IdentifyNamespaceDataStructure::new(id))?
114 .unwrap();
115
116 out.push(Namespace {
117 id,
118 lba_size: ns.lba_size as _,
119 lba_count: ns.namespace_size as _,
120 metadata_size: ns.metadata_size as _,
121 });
122 }
123
124 Ok(out)
125 }
126
127 fn nvme_configure_admin_queue(&mut self) {
132 self.reg().set_admin_submission_and_completion_queue_size(
133 self.admin_queue.sq.len(),
134 self.admin_queue.cq.len(),
135 );
136
137 self.reg()
138 .set_admin_submission_queue_base_address(self.admin_queue.sq.bus_addr());
139
140 self.reg()
141 .set_admin_completion_queue_base_address(self.admin_queue.cq.bus_addr());
142 }
143
144 fn config_io_queue(&mut self, config: Config) -> Result {
145 let num = config.io_queue_pair_count;
146 let cmd = CommandSet::set_features(Feature::NumberOfQueues {
148 nsq: num as u32 - 1,
149 ncq: num as u32 - 1,
150 });
151 self.admin_queue.command_sync(cmd)?;
152
153 for i in 0..num {
154 let id = (i + 1) as u32;
155 let io_queue = NvmeQueue::new(
156 id,
157 self.bar,
158 config.page_size,
159 2usize.pow(self.sqes as _),
160 2usize.pow(self.cqes as _),
161 )?;
162
163 let data = CommandSet::create_io_completion_queue(
164 io_queue.qid,
165 io_queue.cq.len() as _,
166 io_queue.cq.bus_addr(),
167 true,
168 false,
169 0,
170 );
171 self.admin_queue.command_sync(data)?;
172
173 let data = CommandSet::create_io_submission_queue(
174 io_queue.qid,
175 io_queue.sq.len() as _,
176 io_queue.sq.bus_addr(),
177 true,
178 0,
179 io_queue.qid,
180 0,
181 );
182
183 self.admin_queue.command_sync(data)?;
184
185 self.io_queues.push(io_queue);
186 }
187
188 Ok(())
189 }
190
191 pub fn get_identfy<T: Identify>(&mut self, mut want: T) -> Result<T::Output> {
192 let cmd = want.command_set_mut();
193
194 cmd.cdw0 = CommandSet::cdw0_from_opcode(command::Opcode::IDENTIFY);
195 cmd.cdw10 = T::CNS;
196
197 let buff = DVec::zeros(0x1000, 0x1000, Direction::FromDevice).ok_or(Error::NoMemory)?;
198 cmd.prp1 = buff.bus_addr();
199
200 self.admin_queue.command_sync(*cmd)?;
201
202 let res = want.parse(&buff);
203 Ok(res)
204 }
205
206 pub fn block_write_sync(
207 &mut self,
208 ns: &Namespace,
209 block_start: u64,
210 buff: &[u8],
211 ) -> Result<()> {
212 assert!(
213 buff.len() % ns.lba_size == 0,
214 "buffer size must be multiple of lba size"
215 );
216
217 let buff = DSlice::from(buff);
218
219 let blk_num = buff.len() / ns.lba_size;
220
221 let cmd = CommandSet::nvm_cmd_write(ns.id, buff.bus_addr(), block_start, blk_num as _);
222
223 self.io_queues[0].command_sync(cmd)?;
224
225 Ok(())
226 }
227
228 pub fn block_read_sync(
229 &mut self,
230 ns: &Namespace,
231 block_start: u64,
232 buff: &mut [u8],
233 ) -> Result<()> {
234 assert!(
235 buff.len() % ns.lba_size == 0,
236 "buffer size must be multiple of lba size"
237 );
238
239 let buff = DSliceMut::from(buff, Direction::FromDevice);
240
241 let blk_num = buff.len() / ns.lba_size;
242
243 let cmd = CommandSet::nvm_cmd_read(ns.id, buff.bus_addr(), block_start, blk_num as _);
244
245 self.io_queues[0].command_sync(cmd)?;
246
247 buff.preper_read_all();
248 Ok(())
249 }
250
251 pub fn version(&self) -> (usize, usize, usize) {
252 self.reg().version()
253 }
254
255 fn reg(&self) -> &NvmeReg {
256 unsafe { self.bar.as_ref() }
257 }
258}
259
260#[derive(Debug, Clone, Copy)]
261pub struct Namespace {
262 pub id: u32,
263 pub lba_size: usize,
264 pub lba_count: usize,
265 pub metadata_size: usize,
266}